From 43b12ef791ed6f72a8b0854770e1f2d3f28a0c70 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 11 Oct 2023 10:09:46 +1300 Subject: [PATCH 01/71] Move Broadcast message objects to separate library --- arbnode/inbox_tracker.go | 3 +- arbnode/transaction_streamer.go | 3 +- broadcastclient/broadcastclient.go | 8 +-- broadcastclient/broadcastclient_test.go | 7 +-- broadcaster/broadcaster.go | 53 ++++--------------- broadcaster/message/message.go | 42 +++++++++++++++ .../message_serialization_test.go} | 2 +- broadcaster/message/message_test_utils.go | 29 ++++++++++ broadcaster/sequencenumbercatchupbuffer.go | 11 ++-- .../sequencenumbercatchupbuffer_test.go | 42 +++++---------- relay/relay.go | 9 ++-- 11 files changed, 117 insertions(+), 92 deletions(-) create mode 100644 broadcaster/message/message.go rename broadcaster/{broadcaster_serialization_test.go => message/message_serialization_test.go} (98%) create mode 100644 broadcaster/message/message_test_utils.go diff --git a/arbnode/inbox_tracker.go b/arbnode/inbox_tracker.go index 72e4ba2887..51f74cbeb4 100644 --- a/arbnode/inbox_tracker.go +++ b/arbnode/inbox_tracker.go @@ -22,6 +22,7 @@ import ( "github.com/offchainlabs/nitro/arbstate" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/containers" ) @@ -240,7 +241,7 @@ func (t *InboxTracker) PopulateFeedBacklog(broadcastServer *broadcaster.Broadcas if err != nil { return fmt.Errorf("error getting tx streamer message count: %w", err) } - var feedMessages []*broadcaster.BroadcastFeedMessage + var feedMessages []*m.BroadcastFeedMessage for seqNum := startMessage; seqNum < messageCount; seqNum++ { message, err := t.txStreamer.GetMessage(seqNum) if err != nil { diff --git a/arbnode/transaction_streamer.go b/arbnode/transaction_streamer.go index 2ee1526ee9..1761b59a26 100644 --- a/arbnode/transaction_streamer.go +++ b/arbnode/transaction_streamer.go @@ -33,6 +33,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/util/sharedmetrics" @@ -426,7 +427,7 @@ func (s *TransactionStreamer) AddMessages(pos arbutil.MessageIndex, messagesAreC return s.AddMessagesAndEndBatch(pos, messagesAreConfirmed, messages, nil) } -func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedMessage) error { +func (s *TransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFeedMessage) error { if len(feedMessages) == 0 { return nil } diff --git a/broadcastclient/broadcastclient.go b/broadcastclient/broadcastclient.go index e94daa463c..f27fc28fa0 100644 --- a/broadcastclient/broadcastclient.go +++ b/broadcastclient/broadcastclient.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbutil" - "github.com/offchainlabs/nitro/broadcaster" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -117,7 +117,7 @@ var DefaultTestConfig = Config{ } type TransactionStreamerInterface interface { - AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedMessage) error + AddBroadcastMessages(feedMessages []*m.BroadcastFeedMessage) error } type BroadcastClient struct { @@ -381,7 +381,7 @@ func (bc *BroadcastClient) startBackgroundReader(earlyFrameData io.Reader) { backoffDuration = bc.config().ReconnectInitialBackoff if msg != nil { - res := broadcaster.BroadcastMessage{} + res := m.BroadcastMessage{} err = json.Unmarshal(msg, &res) if err != nil { log.Error("error unmarshalling message", "msg", msg, "err", err) @@ -483,7 +483,7 @@ func (bc *BroadcastClient) StopAndWait() { } } -func (bc *BroadcastClient) isValidSignature(ctx context.Context, message *broadcaster.BroadcastFeedMessage) error { +func (bc *BroadcastClient) isValidSignature(ctx context.Context, message *m.BroadcastFeedMessage) error { if bc.config().Verify.Dangerous.AcceptMissing && bc.sigVerifier == nil { // Verifier disabled return nil diff --git a/broadcastclient/broadcastclient_test.go b/broadcastclient/broadcastclient_test.go index fa743d4229..b75bc44c11 100644 --- a/broadcastclient/broadcastclient_test.go +++ b/broadcastclient/broadcastclient_test.go @@ -23,6 +23,7 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/util/testhelpers" @@ -178,20 +179,20 @@ func TestInvalidSignature(t *testing.T) { } type dummyTransactionStreamer struct { - messageReceiver chan broadcaster.BroadcastFeedMessage + messageReceiver chan m.BroadcastFeedMessage chainId uint64 sequencerAddr *common.Address } func NewDummyTransactionStreamer(chainId uint64, sequencerAddr *common.Address) *dummyTransactionStreamer { return &dummyTransactionStreamer{ - messageReceiver: make(chan broadcaster.BroadcastFeedMessage), + messageReceiver: make(chan m.BroadcastFeedMessage), chainId: chainId, sequencerAddr: sequencerAddr, } } -func (ts *dummyTransactionStreamer) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedMessage) error { +func (ts *dummyTransactionStreamer) AddBroadcastMessages(feedMessages []*m.BroadcastFeedMessage) error { for _, feedMessage := range feedMessages { ts.messageReceiver <- *feedMessage } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index c3f4c62ce0..bd7a38289a 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -9,11 +9,11 @@ import ( "github.com/gobwas/ws" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" ) @@ -25,41 +25,6 @@ type Broadcaster struct { dataSigner signature.DataSignerFunc } -// BroadcastMessage is the base message type for messages to send over the network. -// -// Acts as a variant holding the message types. The type of the message is -// indicated by whichever of the fields is non-empty. The fields holding the message -// types are annotated with omitempty so only the populated message is sent as -// json. The message fields should be pointers or slices and end with -// "Messages" or "Message". -// -// The format is forwards compatible, ie if a json BroadcastMessage is received that -// has fields that are not in the Go struct then deserialization will succeed -// skip the unknown field [1] -// -// References: -// [1] https://pkg.go.dev/encoding/json#Unmarshal -type BroadcastMessage struct { - Version int `json:"version"` - // TODO better name than messages since there are different types of messages - Messages []*BroadcastFeedMessage `json:"messages,omitempty"` - ConfirmedSequenceNumberMessage *ConfirmedSequenceNumberMessage `json:"confirmedSequenceNumberMessage,omitempty"` -} - -type BroadcastFeedMessage struct { - SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` - Message arbostypes.MessageWithMetadata `json:"message"` - Signature []byte `json:"signature"` -} - -func (m *BroadcastFeedMessage) Hash(chainId uint64) (common.Hash, error) { - return m.Message.Hash(m.SequenceNumber, chainId) -} - -type ConfirmedSequenceNumberMessage struct { - SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` -} - func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId uint64, feedErrChan chan error, dataSigner signature.DataSignerFunc) *Broadcaster { catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }, func() int { return config().MaxCatchup }) return &Broadcaster{ @@ -70,7 +35,7 @@ func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId u } } -func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMetadata, sequenceNumber arbutil.MessageIndex) (*BroadcastFeedMessage, error) { +func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMetadata, sequenceNumber arbutil.MessageIndex) (*m.BroadcastFeedMessage, error) { var messageSignature []byte if b.dataSigner != nil { hash, err := message.Hash(sequenceNumber, b.chainId) @@ -83,7 +48,7 @@ func (b *Broadcaster) NewBroadcastFeedMessage(message arbostypes.MessageWithMeta } } - return &BroadcastFeedMessage{ + return &m.BroadcastFeedMessage{ SequenceNumber: sequenceNumber, Message: message, Signature: messageSignature, @@ -105,17 +70,17 @@ func (b *Broadcaster) BroadcastSingle(msg arbostypes.MessageWithMetadata, seq ar return nil } -func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *BroadcastFeedMessage) { - broadcastFeedMessages := make([]*BroadcastFeedMessage, 0, 1) +func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { + broadcastFeedMessages := make([]*m.BroadcastFeedMessage, 0, 1) broadcastFeedMessages = append(broadcastFeedMessages, bfm) b.BroadcastFeedMessages(broadcastFeedMessages) } -func (b *Broadcaster) BroadcastFeedMessages(messages []*BroadcastFeedMessage) { +func (b *Broadcaster) BroadcastFeedMessages(messages []*m.BroadcastFeedMessage) { - bm := BroadcastMessage{ + bm := m.BroadcastMessage{ Version: 1, Messages: messages, } @@ -125,9 +90,9 @@ func (b *Broadcaster) BroadcastFeedMessages(messages []*BroadcastFeedMessage) { func (b *Broadcaster) Confirm(seq arbutil.MessageIndex) { log.Debug("confirming sequence number", "sequenceNumber", seq) - b.server.Broadcast(BroadcastMessage{ + b.server.Broadcast(m.BroadcastMessage{ Version: 1, - ConfirmedSequenceNumberMessage: &ConfirmedSequenceNumberMessage{seq}}) + ConfirmedSequenceNumberMessage: &m.ConfirmedSequenceNumberMessage{seq}}) } func (b *Broadcaster) ClientCount() int32 { diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go new file mode 100644 index 0000000000..d42e0c8b60 --- /dev/null +++ b/broadcaster/message/message.go @@ -0,0 +1,42 @@ +package message + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbutil" +) + +// BroadcastMessage is the base message type for messages to send over the network. +// +// Acts as a variant holding the message types. The type of the message is +// indicated by whichever of the fields is non-empty. The fields holding the message +// types are annotated with omitempty so only the populated message is sent as +// json. The message fields should be pointers or slices and end with +// "Messages" or "Message". +// +// The format is forwards compatible, ie if a json BroadcastMessage is received that +// has fields that are not in the Go struct then deserialization will succeed +// skip the unknown field [1] +// +// References: +// [1] https://pkg.go.dev/encoding/json#Unmarshal +type BroadcastMessage struct { + Version int `json:"version"` + // TODO better name than messages since there are different types of messages + Messages []*BroadcastFeedMessage `json:"messages,omitempty"` + ConfirmedSequenceNumberMessage *ConfirmedSequenceNumberMessage `json:"confirmedSequenceNumberMessage,omitempty"` +} + +type BroadcastFeedMessage struct { + SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` + Message arbostypes.MessageWithMetadata `json:"message"` + Signature []byte `json:"signature"` +} + +func (m *BroadcastFeedMessage) Hash(chainId uint64) (common.Hash, error) { + return m.Message.Hash(m.SequenceNumber, chainId) +} + +type ConfirmedSequenceNumberMessage struct { + SequenceNumber arbutil.MessageIndex `json:"sequenceNumber"` +} diff --git a/broadcaster/broadcaster_serialization_test.go b/broadcaster/message/message_serialization_test.go similarity index 98% rename from broadcaster/broadcaster_serialization_test.go rename to broadcaster/message/message_serialization_test.go index 64adb49126..c3e14a86ae 100644 --- a/broadcaster/broadcaster_serialization_test.go +++ b/broadcaster/message/message_serialization_test.go @@ -1,7 +1,7 @@ // Copyright 2021-2022, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE -package broadcaster +package message import ( "bytes" diff --git a/broadcaster/message/message_test_utils.go b/broadcaster/message/message_test_utils.go new file mode 100644 index 0000000000..0943b49c60 --- /dev/null +++ b/broadcaster/message/message_test_utils.go @@ -0,0 +1,29 @@ +package message + +import ( + "github.com/offchainlabs/nitro/arbos/arbostypes" + "github.com/offchainlabs/nitro/arbutil" +) + +func CreateDummyBroadcastMessage(seqNums []arbutil.MessageIndex) *BroadcastMessage { + return &BroadcastMessage{ + Messages: CreateDummyBroadcastMessages(seqNums), + } +} + +func CreateDummyBroadcastMessages(seqNums []arbutil.MessageIndex) []*BroadcastFeedMessage { + return CreateDummyBroadcastMessagesImpl(seqNums, len(seqNums)) +} + +func CreateDummyBroadcastMessagesImpl(seqNums []arbutil.MessageIndex, length int) []*BroadcastFeedMessage { + broadcastMessages := make([]*BroadcastFeedMessage, 0, length) + for _, seqNum := range seqNums { + broadcastMessage := &BroadcastFeedMessage{ + SequenceNumber: seqNum, + Message: arbostypes.EmptyTestMessageWithMetadata, + } + broadcastMessages = append(broadcastMessages, broadcastMessage) + } + + return broadcastMessages +} diff --git a/broadcaster/sequencenumbercatchupbuffer.go b/broadcaster/sequencenumbercatchupbuffer.go index bdd3e60c5b..fb51a3ceaa 100644 --- a/broadcaster/sequencenumbercatchupbuffer.go +++ b/broadcaster/sequencenumbercatchupbuffer.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbutil" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/wsbroadcastserver" ) @@ -26,7 +27,7 @@ var ( ) type SequenceNumberCatchupBuffer struct { - messages []*BroadcastFeedMessage + messages []*m.BroadcastFeedMessage messageCount int32 limitCatchup func() bool maxCatchup func() int @@ -39,7 +40,7 @@ func NewSequenceNumberCatchupBuffer(limitCatchup func() bool, maxCatchup func() } } -func (b *SequenceNumberCatchupBuffer) getCacheMessages(requestedSeqNum arbutil.MessageIndex) *BroadcastMessage { +func (b *SequenceNumberCatchupBuffer) getCacheMessages(requestedSeqNum arbutil.MessageIndex) *m.BroadcastMessage { if len(b.messages) == 0 { return nil } @@ -68,7 +69,7 @@ func (b *SequenceNumberCatchupBuffer) getCacheMessages(requestedSeqNum arbutil.M messagesToSend := b.messages[startingIndex:] if len(messagesToSend) > 0 { - bm := BroadcastMessage{ + bm := m.BroadcastMessage{ Version: 1, Messages: messagesToSend, } @@ -105,7 +106,7 @@ func (b *SequenceNumberCatchupBuffer) pruneBufferToIndex(idx int) { b.messages = b.messages[idx:] if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { // Too much spare capacity, copy to fresh slice to reset memory usage - b.messages = append([]*BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) + b.messages = append([]*m.BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) } } @@ -141,7 +142,7 @@ func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber ar } func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { - broadcastMessage, ok := bmi.(BroadcastMessage) + broadcastMessage, ok := bmi.(m.BroadcastMessage) if !ok { msg := "requested to broadcast message of unknown type" log.Error(msg) diff --git a/broadcaster/sequencenumbercatchupbuffer_test.go b/broadcaster/sequencenumbercatchupbuffer_test.go index fc6655057e..80e1efe5c1 100644 --- a/broadcaster/sequencenumbercatchupbuffer_test.go +++ b/broadcaster/sequencenumbercatchupbuffer_test.go @@ -20,8 +20,8 @@ import ( "strings" "testing" - "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -40,26 +40,10 @@ func TestGetEmptyCacheMessages(t *testing.T) { } } -func createDummyBroadcastMessages(seqNums []arbutil.MessageIndex) []*BroadcastFeedMessage { - return createDummyBroadcastMessagesImpl(seqNums, len(seqNums)) -} -func createDummyBroadcastMessagesImpl(seqNums []arbutil.MessageIndex, length int) []*BroadcastFeedMessage { - broadcastMessages := make([]*BroadcastFeedMessage, 0, length) - for _, seqNum := range seqNums { - broadcastMessage := &BroadcastFeedMessage{ - SequenceNumber: seqNum, - Message: arbostypes.EmptyTestMessageWithMetadata, - } - broadcastMessages = append(broadcastMessages, broadcastMessage) - } - - return broadcastMessages -} - func TestGetCacheMessages(t *testing.T) { indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46} buffer := SequenceNumberCatchupBuffer{ - messages: createDummyBroadcastMessages(indexes), + messages: m.CreateDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, maxCatchup: func() int { return -1 }, @@ -125,7 +109,7 @@ func TestDeleteConfirmedNil(t *testing.T) { func TestDeleteConfirmInvalidOrder(t *testing.T) { indexes := []arbutil.MessageIndex{40, 42} buffer := SequenceNumberCatchupBuffer{ - messages: createDummyBroadcastMessages(indexes), + messages: m.CreateDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, maxCatchup: func() int { return -1 }, @@ -141,7 +125,7 @@ func TestDeleteConfirmInvalidOrder(t *testing.T) { func TestDeleteConfirmed(t *testing.T) { indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46} buffer := SequenceNumberCatchupBuffer{ - messages: createDummyBroadcastMessages(indexes), + messages: m.CreateDummyBroadcastMessages(indexes), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, maxCatchup: func() int { return -1 }, @@ -157,7 +141,7 @@ func TestDeleteConfirmed(t *testing.T) { func TestDeleteFreeMem(t *testing.T) { indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51} buffer := SequenceNumberCatchupBuffer{ - messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), + messages: m.CreateDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, maxCatchup: func() int { return -1 }, @@ -192,14 +176,14 @@ func TestBroadcastBadMessage(t *testing.T) { func TestBroadcastPastSeqNum(t *testing.T) { indexes := []arbutil.MessageIndex{40} buffer := SequenceNumberCatchupBuffer{ - messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), + messages: m.CreateDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, maxCatchup: func() int { return -1 }, } - bm := BroadcastMessage{ - Messages: []*BroadcastFeedMessage{ + bm := m.BroadcastMessage{ + Messages: []*m.BroadcastFeedMessage{ { SequenceNumber: 39, }, @@ -215,14 +199,14 @@ func TestBroadcastPastSeqNum(t *testing.T) { func TestBroadcastFutureSeqNum(t *testing.T) { indexes := []arbutil.MessageIndex{40} buffer := SequenceNumberCatchupBuffer{ - messages: createDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), + messages: m.CreateDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), messageCount: int32(len(indexes)), limitCatchup: func() bool { return false }, maxCatchup: func() int { return -1 }, } - bm := BroadcastMessage{ - Messages: []*BroadcastFeedMessage{ + bm := m.BroadcastMessage{ + Messages: []*m.BroadcastFeedMessage{ { SequenceNumber: 42, }, @@ -246,8 +230,8 @@ func TestMaxCatchupBufferSize(t *testing.T) { firstMessage := 10 for i := firstMessage; i <= 20; i += 2 { - bm := BroadcastMessage{ - Messages: []*BroadcastFeedMessage{ + bm := m.BroadcastMessage{ + Messages: []*m.BroadcastFeedMessage{ { SequenceNumber: arbutil.MessageIndex(i), }, diff --git a/relay/relay.go b/relay/relay.go index 4288902865..8e29971384 100644 --- a/relay/relay.go +++ b/relay/relay.go @@ -16,6 +16,7 @@ import ( "github.com/offchainlabs/nitro/broadcastclient" "github.com/offchainlabs/nitro/broadcastclients" "github.com/offchainlabs/nitro/broadcaster" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/cmd/genericconf" "github.com/offchainlabs/nitro/cmd/util/confighelpers" "github.com/offchainlabs/nitro/util/sharedmetrics" @@ -28,14 +29,14 @@ type Relay struct { broadcastClients *broadcastclients.BroadcastClients broadcaster *broadcaster.Broadcaster confirmedSequenceNumberChan chan arbutil.MessageIndex - messageChan chan broadcaster.BroadcastFeedMessage + messageChan chan m.BroadcastFeedMessage } type MessageQueue struct { - queue chan broadcaster.BroadcastFeedMessage + queue chan m.BroadcastFeedMessage } -func (q *MessageQueue) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedMessage) error { +func (q *MessageQueue) AddBroadcastMessages(feedMessages []*m.BroadcastFeedMessage) error { for _, feedMessage := range feedMessages { q.queue <- *feedMessage } @@ -45,7 +46,7 @@ func (q *MessageQueue) AddBroadcastMessages(feedMessages []*broadcaster.Broadcas func NewRelay(config *Config, feedErrChan chan error) (*Relay, error) { - q := MessageQueue{make(chan broadcaster.BroadcastFeedMessage, config.Queue)} + q := MessageQueue{make(chan m.BroadcastFeedMessage, config.Queue)} confirmedSequenceNumberListener := make(chan arbutil.MessageIndex, config.Queue) From 3f119a49474f1bc1f4696737ebac2e81dc00506c Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 11 Oct 2023 11:25:19 +1300 Subject: [PATCH 02/71] Create backlog library --- broadcaster/backlog/backlog.go | 288 ++++++++++++++++++++++ broadcaster/backlog/backlog_test.go | 367 ++++++++++++++++++++++++++++ broadcaster/backlog/config.go | 24 ++ 3 files changed, 679 insertions(+) create mode 100644 broadcaster/backlog/backlog.go create mode 100644 broadcaster/backlog/backlog_test.go create mode 100644 broadcaster/backlog/config.go diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go new file mode 100644 index 0000000000..499d0a5fa1 --- /dev/null +++ b/broadcaster/backlog/backlog.go @@ -0,0 +1,288 @@ +package backlog + +import ( + "errors" + "fmt" + "sync/atomic" + + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/arbutil" + m "github.com/offchainlabs/nitro/broadcaster/message" + "github.com/offchainlabs/nitro/util/arbmath" +) + +var ( + errDropSegments = errors.New("remove previous segments from backlog") + errSequenceNumberSeen = errors.New("sequence number already present in backlog") + errOutOfBounds = errors.New("message not found in backlog") +) + +type Backlog interface { + Append(bm *m.BroadcastMessage) error + Get(start, end arbutil.MessageIndex) (*m.BroadcastMessage, error) + MessageCount() int +} + +type backlog struct { + head atomic.Pointer[backlogSegment] + tail atomic.Pointer[backlogSegment] + lookupByIndex map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment] + config ConfigFetcher + messageCount atomic.Uint64 +} + +func NewBacklog(c ConfigFetcher) Backlog { + lookup := make(map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]) + return &backlog{ + lookupByIndex: lookup, + config: c, + } +} + +// Append will add the given messages to the backlogSegment at head until +// that segment reaches its limit. If messages remain to be added a new segment +// will be created. +func (b *backlog) Append(bm *m.BroadcastMessage) error { + + if bm.ConfirmedSequenceNumberMessage != nil { + b.delete(bm.ConfirmedSequenceNumberMessage.SequenceNumber) + // add to metric? + } + + for _, msg := range bm.Messages { + s := b.tail.Load() + if s == nil { + s = &backlogSegment{} + b.head.Store(s) + b.tail.Store(s) + } + + prevMsgIdx := s.end + if s.MessageCount() >= b.config().SegmentLimit { + nextS := &backlogSegment{} + s.nextSegment.Store(nextS) + prevMsgIdx = s.end + nextS.previousSegment.Store(s) + s = nextS + b.tail.Store(s) + } + + err := s.append(prevMsgIdx, msg) + if errors.Is(err, errDropSegments) { + head := b.head.Load() + b.removeFromLookup(head.start, msg.SequenceNumber) + b.head.Store(s) + b.tail.Store(s) + b.messageCount.Store(0) + log.Warn(err.Error()) + } else if errors.Is(err, errSequenceNumberSeen) { + log.Info("ignoring message sequence number (%s), already in backlog", msg.SequenceNumber) + continue + } else if err != nil { + return err + } + p := &atomic.Pointer[backlogSegment]{} + p.Store(s) + b.lookupByIndex[msg.SequenceNumber] = p + b.messageCount.Add(1) + } + + return nil +} + +// Get reads messages from the given start to end MessageIndex +func (b *backlog) Get(start, end arbutil.MessageIndex) (*m.BroadcastMessage, error) { + head := b.head.Load() + tail := b.tail.Load() + if head == nil && tail == nil { + return nil, errOutOfBounds + } + + if start < head.start { + start = head.start + } + + if end > tail.end { + return nil, errOutOfBounds + } + + s, err := b.lookup(start) + if err != nil { + return nil, err + } + + bm := &m.BroadcastMessage{Version: 1} + required := int(end-start) + 1 + for { + segMsgs, err := s.get(arbmath.MaxInt(start, s.start), arbmath.MinInt(end, s.end)) + if err != nil { + return nil, err + } + + bm.Messages = append(bm.Messages, segMsgs...) + s = s.nextSegment.Load() + if len(bm.Messages) == required { + break + } else if s == nil { + return nil, errOutOfBounds + } + } + return bm, nil +} + +// delete removes segments before the confirmed sequence number given. It will +// not remove the segment containing the confirmed sequence number. +func (b *backlog) delete(confirmed arbutil.MessageIndex) { + head := b.head.Load() + tail := b.tail.Load() + if head == nil && tail == nil { + return + } + + if confirmed < head.start { + return + } + + if confirmed > tail.end { + log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.end) + b.reset() + // should this be returning an error? The other buffer does not and just continues + return + } + + // find the segment containing the confirmed message + s, err := b.lookup(confirmed) + if err != nil { + log.Error(fmt.Sprintf("%s: clearing backlog", err.Error())) + b.reset() + // should this be returning an error? The other buffer does not and just continues + return + } + + // check the segment actually contains that message + if found := s.contains(confirmed); !found { + log.Error("error message not found in backlog segment, clearing backlog", "confirmed sequence number", confirmed) + b.reset() + // should this be returning an error? The other buffer does not and just continues + return + } + + // remove all previous segments + previous := s.previousSegment.Load() + if previous == nil { + return + } + b.removeFromLookup(head.start, previous.end) + b.head.Store(s) + count := b.messageCount.Load() - uint64(previous.end-head.start) - uint64(1) + b.messageCount.Store(count) +} + +// removeFromLookup removes all entries from the head segment's start index to +// the given confirmed index +func (b *backlog) removeFromLookup(start arbutil.MessageIndex, end arbutil.MessageIndex) { + for i := start; i == end; i++ { + delete(b.lookupByIndex, i) + } +} + +func (b *backlog) lookup(i arbutil.MessageIndex) (*backlogSegment, error) { + pointer, ok := b.lookupByIndex[i] + if !ok { + return nil, fmt.Errorf("error finding backlog segment containing message with SequenceNumber %d", i) + } + + s := pointer.Load() + if s == nil { + return nil, fmt.Errorf("error loading backlog segment containing message with SequenceNumber %d", i) + } + + return s, nil +} + +func (s *backlog) MessageCount() int { + return int(s.messageCount.Load()) +} + +// reset removes all segments from the backlog +func (b *backlog) reset() { + b.head = atomic.Pointer[backlogSegment]{} + b.tail = atomic.Pointer[backlogSegment]{} + b.lookupByIndex = map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]{} + b.messageCount.Store(0) +} + +type backlogSegment struct { + start arbutil.MessageIndex + end arbutil.MessageIndex + messages []*m.BroadcastFeedMessage + messageCount atomic.Uint64 + nextSegment atomic.Pointer[backlogSegment] + previousSegment atomic.Pointer[backlogSegment] +} + +// get reads messages from the given start to end MessageIndex +func (s *backlogSegment) get(start, end arbutil.MessageIndex) ([]*m.BroadcastFeedMessage, error) { + noMsgs := []*m.BroadcastFeedMessage{} + if start < s.start { + return noMsgs, errOutOfBounds + } + + if end > s.end { + return noMsgs, errOutOfBounds + } + + startIndex := int(start - s.start) + endIndex := int(end-s.start) + 1 + return s.messages[startIndex:endIndex], nil +} + +// append appends the given BroadcastFeedMessage to messages if it is the first +// message in the sequence or the next in the sequence. If segment's end +// message is ahead of the given message append will do nothing. If the given +// message is ahead of the segment's end message append will return +// errDropSegments to ensure any messages before the given message are dropped. +func (s *backlogSegment) append(prevMsgIdx arbutil.MessageIndex, msg *m.BroadcastFeedMessage) error { + seen := false + defer s.updateSegment(&seen) + + if expSeqNum := prevMsgIdx + 1; prevMsgIdx == 0 || msg.SequenceNumber == expSeqNum { + s.messages = append(s.messages, msg) + } else if msg.SequenceNumber > expSeqNum { + s.messages = nil + s.messages = append(s.messages, msg) + return fmt.Errorf("new message sequence number (%d) is greater than the expected sequence number (%d): %w", msg.SequenceNumber, expSeqNum, errDropSegments) + } else { + seen = true + return errSequenceNumberSeen + } + return nil +} + +// contains confirms whether the segment contains a message with the given sequence number +func (s *backlogSegment) contains(i arbutil.MessageIndex) bool { + if i < s.start || i > s.end { + return false + } + + msgIndex := uint64(i - s.start) + msg := s.messages[msgIndex] + return msg.SequenceNumber == i +} + +// updateSegment updates the messageCount, start and end indices of the segment +// this should be called using defer whenever a method updates the messages. It +// will do nothing if the message has already been seen by the backlog. +func (s *backlogSegment) updateSegment(seen *bool) { + if !*seen { + c := len(s.messages) + s.messageCount.Store(uint64(c)) + s.start = s.messages[0].SequenceNumber + s.end = s.messages[c-1].SequenceNumber + } +} + +// MessageCount returns the number of messages stored in the backlog +func (s *backlogSegment) MessageCount() int { + return int(s.messageCount.Load()) +} diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go new file mode 100644 index 0000000000..346d825d37 --- /dev/null +++ b/broadcaster/backlog/backlog_test.go @@ -0,0 +1,367 @@ +package backlog + +import ( + "errors" + "sync/atomic" + "testing" + + "github.com/offchainlabs/nitro/arbutil" + m "github.com/offchainlabs/nitro/broadcaster/message" + "github.com/offchainlabs/nitro/util/arbmath" +) + +func validateBacklog(t *testing.T, b *backlog, count int, start, end arbutil.MessageIndex, lookupKeys []arbutil.MessageIndex) { + if b.MessageCount() != count { + t.Errorf("backlog message count (%d) does not equal expected message count (%d)", b.MessageCount(), count) + } + + head := b.head.Load() + if start != 0 && head.start != start { + t.Errorf("head of backlog (%d) does not equal expected head (%d)", head.start, start) + } + + tail := b.tail.Load() + if end != 0 && tail.end != end { + t.Errorf("tail of backlog (%d) does not equal expected tail (%d)", tail.end, end) + } + + for _, k := range lookupKeys { + if _, ok := b.lookupByIndex[k]; !ok { + t.Errorf("failed to find message (%d) in lookup", k) + } + } +} + +func createDummyBacklog(indexes []arbutil.MessageIndex) (*backlog, error) { + b := &backlog{ + lookupByIndex: map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]{}, + config: func() *Config { return &DefaultTestConfig }, + } + bm := &m.BroadcastMessage{Messages: m.CreateDummyBroadcastMessages(indexes)} + err := b.Append(bm) + return b, err +} + +func TestAppend(t *testing.T) { + testcases := []struct { + name string + backlogIndexes []arbutil.MessageIndex + newIndexes []arbutil.MessageIndex + expectedCount int + expectedStart arbutil.MessageIndex + expectedEnd arbutil.MessageIndex + expectedLookupKeys []arbutil.MessageIndex + }{ + { + "EmptyBacklog", + []arbutil.MessageIndex{}, + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + 7, + 40, + 46, + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + }, + { + "NonEmptyBacklog", + []arbutil.MessageIndex{40, 41}, + []arbutil.MessageIndex{42, 43, 44, 45, 46}, + 7, + 40, + 46, + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + }, + { + "NonSequential", + []arbutil.MessageIndex{40, 41}, + []arbutil.MessageIndex{42, 43, 45, 46}, + 2, // Message 45 is non sequential, the previous messages will be dropped from the backlog + 45, + 46, + []arbutil.MessageIndex{45, 46}, + }, + { + "MessageSeen", + []arbutil.MessageIndex{40, 41}, + []arbutil.MessageIndex{42, 43, 44, 45, 46, 41}, + 7, // Message 41 is already present in the backlog, it will be ignored + 40, + 46, + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + }, + { + "NonSequentialFirstSegmentMessage", + []arbutil.MessageIndex{40, 41}, + []arbutil.MessageIndex{42, 44, 45, 46}, + 3, // Message 44 is non sequential and the first message in a new segment, the previous messages will be dropped from the backlog + 44, + 46, + []arbutil.MessageIndex{45, 46}, + }, + { + "MessageSeenFirstSegmentMessage", + []arbutil.MessageIndex{40, 41}, + []arbutil.MessageIndex{42, 43, 44, 45, 41, 46}, + 7, // Message 41 is already present in the backlog and the first message in a new segment, it will be ignored + 40, + 46, + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // The segment limit is 3, the above test cases have been created + // to include testing certain actions on the first message of a + // new segment. + b, err := createDummyBacklog(tc.backlogIndexes) + if err != nil { + t.Fatalf("error creating dummy backlog: %s", err) + } + + bm := &m.BroadcastMessage{Messages: m.CreateDummyBroadcastMessages(tc.newIndexes)} + err = b.Append(bm) + if err != nil { + t.Fatalf("error appending BroadcastMessage: %s", err) + } + + validateBacklog(t, b, tc.expectedCount, tc.expectedStart, tc.expectedEnd, tc.expectedLookupKeys) + }) + } +} + +func TestDeleteInvalidBacklog(t *testing.T) { + // Create a backlog with an invalid sequence + s := &backlogSegment{ + start: 40, + end: 42, + messages: m.CreateDummyBroadcastMessages([]arbutil.MessageIndex{40, 42}), + } + s.messageCount.Store(2) + + p := &atomic.Pointer[backlogSegment]{} + p.Store(s) + lookup := make(map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]) + lookup[40] = p + b := &backlog{ + lookupByIndex: lookup, + config: func() *Config { return &DefaultTestConfig }, + } + b.messageCount.Store(2) + b.head.Store(s) + b.tail.Store(s) + + bm := &m.BroadcastMessage{ + Messages: nil, + ConfirmedSequenceNumberMessage: &m.ConfirmedSequenceNumberMessage{ + SequenceNumber: 41, + }, + } + + err := b.Append(bm) + if err != nil { + t.Fatalf("error appending BroadcastMessage: %s", err) + } + + validateBacklog(t, b, 0, 0, 0, []arbutil.MessageIndex{}) +} + +func TestDelete(t *testing.T) { + testcases := []struct { + name string + backlogIndexes []arbutil.MessageIndex + confirmed arbutil.MessageIndex + expectedCount int + expectedStart arbutil.MessageIndex + expectedEnd arbutil.MessageIndex + expectedLookupKeys []arbutil.MessageIndex + }{ + { + "EmptyBacklog", + []arbutil.MessageIndex{}, + 0, // no segements in backlog so nothing to delete + 0, + 0, + 0, + []arbutil.MessageIndex{}, + }, + { + "MsgBeforeBacklog", + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + 39, // no segments will be deleted + 7, + 40, + 46, + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + }, + { + "MsgInBacklog", + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + 43, // only the first segment will be deleted + 4, + 43, + 46, + []arbutil.MessageIndex{43, 44, 45, 46}, + }, + { + "MsgInFirstSegmentInBacklog", + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + 42, // first segment will not be deleted as confirmed message is there + 7, + 40, + 46, + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + }, + { + "MsgAfterBacklog", + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + 47, // all segments will be deleted + 0, + 0, + 0, + []arbutil.MessageIndex{}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + b, err := createDummyBacklog(tc.backlogIndexes) + if err != nil { + t.Fatalf("error creating dummy backlog: %s", err) + } + + bm := &m.BroadcastMessage{ + Messages: nil, + ConfirmedSequenceNumberMessage: &m.ConfirmedSequenceNumberMessage{ + SequenceNumber: tc.confirmed, + }, + } + + err = b.Append(bm) + if err != nil { + t.Fatalf("error appending BroadcastMessage: %s", err) + } + + validateBacklog(t, b, tc.expectedCount, tc.expectedStart, tc.expectedEnd, tc.expectedLookupKeys) + }) + } +} + +// make sure that an append, then delete, then append ends up with the correct messageCounts + +func TestGetEmptyBacklog(t *testing.T) { + b, err := createDummyBacklog([]arbutil.MessageIndex{}) + if err != nil { + t.Fatalf("error creating dummy backlog: %s", err) + } + + _, err = b.Get(1, 2) + if !errors.Is(err, errOutOfBounds) { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestGet(t *testing.T) { + indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46} + b, err := createDummyBacklog(indexes) + if err != nil { + t.Fatalf("error creating dummy backlog: %s", err) + } + + testcases := []struct { + name string + start arbutil.MessageIndex + end arbutil.MessageIndex + expectedErr error + expectedCount int + }{ + { + "LowerBoundFar", + 0, + 43, + nil, + 4, + }, + { + "LowerBoundClose", + 39, + 43, + nil, + 4, + }, + { + "UpperBoundFar", + 43, + 18446744073709551615, + errOutOfBounds, + 0, + }, + { + "UpperBoundClose", + 0, + 47, + errOutOfBounds, + 0, + }, + { + "AllMessages", + 40, + 46, + nil, + 7, + }, + { + "SomeMessages", + 42, + 44, + nil, + 3, + }, + { + "FirstMessage", + 40, + 40, + nil, + 1, + }, + { + "LastMessage", + 46, + 46, + nil, + 1, + }, + { + "SingleMessage", + 43, + 43, + nil, + 1, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + bm, err := b.Get(tc.start, tc.end) + if !errors.Is(err, tc.expectedErr) { + t.Fatalf("unexpected error: %s", err) + } + + // Some of the tests are checking the correct error is returned + // Do not check bm if an error should be returned + if err == nil { + actualCount := len(bm.Messages) + if actualCount != tc.expectedCount { + t.Fatalf("number of messages returned (%d) does not equal the expected number of messages (%d)", actualCount, tc.expectedCount) + } + + start := arbmath.MaxInt(tc.start, 40) + for i := start; i <= tc.end; i++ { + msg := bm.Messages[i-start] + if msg.SequenceNumber != i { + t.Fatalf("unexpected sequence number (%d) in %d returned message", i, i-tc.start) + } + } + } + }) + } +} diff --git a/broadcaster/backlog/config.go b/broadcaster/backlog/config.go new file mode 100644 index 0000000000..0e760cd0cf --- /dev/null +++ b/broadcaster/backlog/config.go @@ -0,0 +1,24 @@ +package backlog + +import ( + flag "github.com/spf13/pflag" +) + +type ConfigFetcher func() *Config + +type Config struct { + SegmentLimit int `koanf:"segment-limit" reload:"hot"` +} + +func AddOptions(prefix string, f *flag.FlagSet) { + f.Int(prefix+".segment-limit", DefaultConfig.SegmentLimit, "the maximum number of messages each segment within the backlog can contain") +} + +var ( + DefaultConfig = Config{ + SegmentLimit: 240, + } + DefaultTestConfig = Config{ + SegmentLimit: 3, + } +) From 342e3c9e29d374c0674e19c2fb4148d6490e096a Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 11 Oct 2023 16:29:17 +1300 Subject: [PATCH 03/71] create a race condition test for backlog library --- broadcaster/backlog/backlog_test.go | 79 ++++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 13 deletions(-) diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 346d825d37..d22f422022 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -2,8 +2,10 @@ package backlog import ( "errors" + "sync" "sync/atomic" "testing" + "time" "github.com/offchainlabs/nitro/arbutil" m "github.com/offchainlabs/nitro/broadcaster/message" @@ -32,6 +34,21 @@ func validateBacklog(t *testing.T, b *backlog, count int, start, end arbutil.Mes } } +func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCount int, start, end arbutil.MessageIndex) { + actualCount := len(bm.Messages) + if actualCount != expectedCount { + t.Errorf("number of messages returned (%d) does not equal the expected number of messages (%d)", actualCount, expectedCount) + } + + s := arbmath.MaxInt(start, 40) + for i := s; i <= end; i++ { + msg := bm.Messages[i-s] + if msg.SequenceNumber != i { + t.Errorf("unexpected sequence number (%d) in %d returned message", i, i-s) + } + } +} + func createDummyBacklog(indexes []arbutil.MessageIndex) (*backlog, error) { b := &backlog{ lookupByIndex: map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]{}, @@ -348,20 +365,56 @@ func TestGet(t *testing.T) { // Some of the tests are checking the correct error is returned // Do not check bm if an error should be returned - if err == nil { - actualCount := len(bm.Messages) - if actualCount != tc.expectedCount { - t.Fatalf("number of messages returned (%d) does not equal the expected number of messages (%d)", actualCount, tc.expectedCount) - } - - start := arbmath.MaxInt(tc.start, 40) - for i := start; i <= tc.end; i++ { - msg := bm.Messages[i-start] - if msg.SequenceNumber != i { - t.Fatalf("unexpected sequence number (%d) in %d returned message", i, i-tc.start) - } - } + if tc.expectedErr == nil { + validateBroadcastMessage(t, bm, tc.expectedCount, tc.start, tc.end) } }) } } + +// TestBacklogRaceCondition performs read & write operations in separate +// goroutines to ensure that the backlog does not have race conditions. The +// `go test -race` command can be used to test this. +func TestBacklogRaceCondition(t *testing.T) { + indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46} + b, err := createDummyBacklog(indexes) + if err != nil { + t.Fatalf("error creating dummy backlog: %s", err) + } + + wg := sync.WaitGroup{} + newIndexes := []arbutil.MessageIndex{47, 48, 49, 50, 51, 52, 53, 54, 55} + + // Write to backlog in goroutine + wg.Add(1) + go func(t *testing.T, b *backlog) { + defer wg.Done() + for _, i := range newIndexes { + bm := m.CreateDummyBroadcastMessage([]arbutil.MessageIndex{i}) + err := b.Append(bm) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + time.Sleep(time.Millisecond) + } + }(t, b) + + // Read from backlog in goroutine + wg.Add(1) + go func(t *testing.T, b *backlog) { + defer wg.Done() + for _, i := range []arbutil.MessageIndex{42, 43, 44, 45, 46, 47} { + bm, err := b.Get(i, i+1) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } else { + validateBroadcastMessage(t, bm, 2, i, i+1) + } + time.Sleep(2 * time.Millisecond) + } + }(t, b) + + // Wait for both goroutines to finish + wg.Wait() + validateBacklog(t, b, 16, 40, 55, append(indexes, newIndexes...)) +} From b55d38ee2bf537ae1c52af96652f098f765c1f05 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 12 Oct 2023 09:30:28 +1300 Subject: [PATCH 04/71] fix backlog race conditions --- broadcaster/backlog/backlog.go | 87 ++++++++++++++++------------- broadcaster/backlog/backlog_test.go | 38 ++++++------- 2 files changed, 67 insertions(+), 58 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 499d0a5fa1..6feb7576f1 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -3,10 +3,10 @@ package backlog import ( "errors" "fmt" + "sync" "sync/atomic" "github.com/ethereum/go-ethereum/log" - "github.com/offchainlabs/nitro/arbutil" m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/arbmath" ) @@ -19,20 +19,21 @@ var ( type Backlog interface { Append(bm *m.BroadcastMessage) error - Get(start, end arbutil.MessageIndex) (*m.BroadcastMessage, error) + Get(start, end uint64) (*m.BroadcastMessage, error) MessageCount() int } type backlog struct { head atomic.Pointer[backlogSegment] tail atomic.Pointer[backlogSegment] - lookupByIndex map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment] + lookupLock sync.RWMutex + lookupByIndex map[uint64]*atomic.Pointer[backlogSegment] config ConfigFetcher messageCount atomic.Uint64 } func NewBacklog(c ConfigFetcher) Backlog { - lookup := make(map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]) + lookup := make(map[uint64]*atomic.Pointer[backlogSegment]) return &backlog{ lookupByIndex: lookup, config: c, @@ -45,7 +46,7 @@ func NewBacklog(c ConfigFetcher) Backlog { func (b *backlog) Append(bm *m.BroadcastMessage) error { if bm.ConfirmedSequenceNumberMessage != nil { - b.delete(bm.ConfirmedSequenceNumberMessage.SequenceNumber) + b.delete(uint64(bm.ConfirmedSequenceNumberMessage.SequenceNumber)) // add to metric? } @@ -57,11 +58,11 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { b.tail.Store(s) } - prevMsgIdx := s.end + prevMsgIdx := s.end.Load() if s.MessageCount() >= b.config().SegmentLimit { nextS := &backlogSegment{} s.nextSegment.Store(nextS) - prevMsgIdx = s.end + prevMsgIdx = s.end.Load() nextS.previousSegment.Store(s) s = nextS b.tail.Store(s) @@ -70,7 +71,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { err := s.append(prevMsgIdx, msg) if errors.Is(err, errDropSegments) { head := b.head.Load() - b.removeFromLookup(head.start, msg.SequenceNumber) + b.removeFromLookup(head.start.Load(), uint64(msg.SequenceNumber)) b.head.Store(s) b.tail.Store(s) b.messageCount.Store(0) @@ -83,7 +84,9 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } p := &atomic.Pointer[backlogSegment]{} p.Store(s) - b.lookupByIndex[msg.SequenceNumber] = p + b.lookupLock.Lock() + b.lookupByIndex[uint64(msg.SequenceNumber)] = p + b.lookupLock.Unlock() b.messageCount.Add(1) } @@ -91,18 +94,18 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } // Get reads messages from the given start to end MessageIndex -func (b *backlog) Get(start, end arbutil.MessageIndex) (*m.BroadcastMessage, error) { +func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { head := b.head.Load() tail := b.tail.Load() if head == nil && tail == nil { return nil, errOutOfBounds } - if start < head.start { - start = head.start + if start < head.start.Load() { + start = head.start.Load() } - if end > tail.end { + if end > tail.end.Load() { return nil, errOutOfBounds } @@ -114,7 +117,7 @@ func (b *backlog) Get(start, end arbutil.MessageIndex) (*m.BroadcastMessage, err bm := &m.BroadcastMessage{Version: 1} required := int(end-start) + 1 for { - segMsgs, err := s.get(arbmath.MaxInt(start, s.start), arbmath.MinInt(end, s.end)) + segMsgs, err := s.get(arbmath.MaxInt(start, s.start.Load()), arbmath.MinInt(end, s.end.Load())) if err != nil { return nil, err } @@ -132,19 +135,19 @@ func (b *backlog) Get(start, end arbutil.MessageIndex) (*m.BroadcastMessage, err // delete removes segments before the confirmed sequence number given. It will // not remove the segment containing the confirmed sequence number. -func (b *backlog) delete(confirmed arbutil.MessageIndex) { +func (b *backlog) delete(confirmed uint64) { head := b.head.Load() tail := b.tail.Load() if head == nil && tail == nil { return } - if confirmed < head.start { + if confirmed < head.start.Load() { return } - if confirmed > tail.end { - log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.end) + if confirmed > tail.end.Load() { + log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.end.Load()) b.reset() // should this be returning an error? The other buffer does not and just continues return @@ -172,22 +175,26 @@ func (b *backlog) delete(confirmed arbutil.MessageIndex) { if previous == nil { return } - b.removeFromLookup(head.start, previous.end) + b.removeFromLookup(head.start.Load(), previous.end.Load()) b.head.Store(s) - count := b.messageCount.Load() - uint64(previous.end-head.start) - uint64(1) + count := b.messageCount.Load() + head.start.Load() - previous.end.Load() - uint64(1) b.messageCount.Store(count) } // removeFromLookup removes all entries from the head segment's start index to // the given confirmed index -func (b *backlog) removeFromLookup(start arbutil.MessageIndex, end arbutil.MessageIndex) { +func (b *backlog) removeFromLookup(start, end uint64) { + b.lookupLock.Lock() + defer b.lookupLock.Unlock() for i := start; i == end; i++ { delete(b.lookupByIndex, i) } } -func (b *backlog) lookup(i arbutil.MessageIndex) (*backlogSegment, error) { +func (b *backlog) lookup(i uint64) (*backlogSegment, error) { + b.lookupLock.RLock() pointer, ok := b.lookupByIndex[i] + b.lookupLock.RUnlock() if !ok { return nil, fmt.Errorf("error finding backlog segment containing message with SequenceNumber %d", i) } @@ -206,15 +213,17 @@ func (s *backlog) MessageCount() int { // reset removes all segments from the backlog func (b *backlog) reset() { + b.lookupLock.Lock() + defer b.lookupLock.Unlock() b.head = atomic.Pointer[backlogSegment]{} b.tail = atomic.Pointer[backlogSegment]{} - b.lookupByIndex = map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]{} + b.lookupByIndex = map[uint64]*atomic.Pointer[backlogSegment]{} b.messageCount.Store(0) } type backlogSegment struct { - start arbutil.MessageIndex - end arbutil.MessageIndex + start atomic.Uint64 + end atomic.Uint64 messages []*m.BroadcastFeedMessage messageCount atomic.Uint64 nextSegment atomic.Pointer[backlogSegment] @@ -222,18 +231,18 @@ type backlogSegment struct { } // get reads messages from the given start to end MessageIndex -func (s *backlogSegment) get(start, end arbutil.MessageIndex) ([]*m.BroadcastFeedMessage, error) { +func (s *backlogSegment) get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { noMsgs := []*m.BroadcastFeedMessage{} - if start < s.start { + if start < s.start.Load() { return noMsgs, errOutOfBounds } - if end > s.end { + if end > s.end.Load() { return noMsgs, errOutOfBounds } - startIndex := int(start - s.start) - endIndex := int(end-s.start) + 1 + startIndex := start - s.start.Load() + endIndex := end - s.start.Load() + 1 return s.messages[startIndex:endIndex], nil } @@ -242,13 +251,13 @@ func (s *backlogSegment) get(start, end arbutil.MessageIndex) ([]*m.BroadcastFee // message is ahead of the given message append will do nothing. If the given // message is ahead of the segment's end message append will return // errDropSegments to ensure any messages before the given message are dropped. -func (s *backlogSegment) append(prevMsgIdx arbutil.MessageIndex, msg *m.BroadcastFeedMessage) error { +func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) error { seen := false defer s.updateSegment(&seen) - if expSeqNum := prevMsgIdx + 1; prevMsgIdx == 0 || msg.SequenceNumber == expSeqNum { + if expSeqNum := prevMsgIdx + 1; prevMsgIdx == 0 || uint64(msg.SequenceNumber) == expSeqNum { s.messages = append(s.messages, msg) - } else if msg.SequenceNumber > expSeqNum { + } else if uint64(msg.SequenceNumber) > expSeqNum { s.messages = nil s.messages = append(s.messages, msg) return fmt.Errorf("new message sequence number (%d) is greater than the expected sequence number (%d): %w", msg.SequenceNumber, expSeqNum, errDropSegments) @@ -260,14 +269,14 @@ func (s *backlogSegment) append(prevMsgIdx arbutil.MessageIndex, msg *m.Broadcas } // contains confirms whether the segment contains a message with the given sequence number -func (s *backlogSegment) contains(i arbutil.MessageIndex) bool { - if i < s.start || i > s.end { +func (s *backlogSegment) contains(i uint64) bool { + if i < s.start.Load() || i > s.end.Load() { return false } - msgIndex := uint64(i - s.start) + msgIndex := i - s.start.Load() msg := s.messages[msgIndex] - return msg.SequenceNumber == i + return uint64(msg.SequenceNumber) == i } // updateSegment updates the messageCount, start and end indices of the segment @@ -277,8 +286,8 @@ func (s *backlogSegment) updateSegment(seen *bool) { if !*seen { c := len(s.messages) s.messageCount.Store(uint64(c)) - s.start = s.messages[0].SequenceNumber - s.end = s.messages[c-1].SequenceNumber + s.start.Store(uint64(s.messages[0].SequenceNumber)) + s.end.Store(uint64(s.messages[c-1].SequenceNumber)) } } diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index d22f422022..fe062aee1f 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -12,29 +12,29 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" ) -func validateBacklog(t *testing.T, b *backlog, count int, start, end arbutil.MessageIndex, lookupKeys []arbutil.MessageIndex) { +func validateBacklog(t *testing.T, b *backlog, count int, start, end uint64, lookupKeys []arbutil.MessageIndex) { if b.MessageCount() != count { t.Errorf("backlog message count (%d) does not equal expected message count (%d)", b.MessageCount(), count) } head := b.head.Load() - if start != 0 && head.start != start { - t.Errorf("head of backlog (%d) does not equal expected head (%d)", head.start, start) + if start != 0 && head.start.Load() != start { + t.Errorf("head of backlog (%d) does not equal expected head (%d)", head.start.Load(), start) } tail := b.tail.Load() - if end != 0 && tail.end != end { - t.Errorf("tail of backlog (%d) does not equal expected tail (%d)", tail.end, end) + if end != 0 && tail.end.Load() != end { + t.Errorf("tail of backlog (%d) does not equal expected tail (%d)", tail.end.Load(), end) } for _, k := range lookupKeys { - if _, ok := b.lookupByIndex[k]; !ok { + if _, err := b.lookup(uint64(k)); err != nil { t.Errorf("failed to find message (%d) in lookup", k) } } } -func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCount int, start, end arbutil.MessageIndex) { +func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCount int, start, end uint64) { actualCount := len(bm.Messages) if actualCount != expectedCount { t.Errorf("number of messages returned (%d) does not equal the expected number of messages (%d)", actualCount, expectedCount) @@ -43,7 +43,7 @@ func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCoun s := arbmath.MaxInt(start, 40) for i := s; i <= end; i++ { msg := bm.Messages[i-s] - if msg.SequenceNumber != i { + if uint64(msg.SequenceNumber) != i { t.Errorf("unexpected sequence number (%d) in %d returned message", i, i-s) } } @@ -51,7 +51,7 @@ func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCoun func createDummyBacklog(indexes []arbutil.MessageIndex) (*backlog, error) { b := &backlog{ - lookupByIndex: map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]{}, + lookupByIndex: map[uint64]*atomic.Pointer[backlogSegment]{}, config: func() *Config { return &DefaultTestConfig }, } bm := &m.BroadcastMessage{Messages: m.CreateDummyBroadcastMessages(indexes)} @@ -65,8 +65,8 @@ func TestAppend(t *testing.T) { backlogIndexes []arbutil.MessageIndex newIndexes []arbutil.MessageIndex expectedCount int - expectedStart arbutil.MessageIndex - expectedEnd arbutil.MessageIndex + expectedStart uint64 + expectedEnd uint64 expectedLookupKeys []arbutil.MessageIndex }{ { @@ -149,15 +149,15 @@ func TestAppend(t *testing.T) { func TestDeleteInvalidBacklog(t *testing.T) { // Create a backlog with an invalid sequence s := &backlogSegment{ - start: 40, - end: 42, messages: m.CreateDummyBroadcastMessages([]arbutil.MessageIndex{40, 42}), } + s.start.Store(40) + s.end.Store(42) s.messageCount.Store(2) p := &atomic.Pointer[backlogSegment]{} p.Store(s) - lookup := make(map[arbutil.MessageIndex]*atomic.Pointer[backlogSegment]) + lookup := make(map[uint64]*atomic.Pointer[backlogSegment]) lookup[40] = p b := &backlog{ lookupByIndex: lookup, @@ -188,8 +188,8 @@ func TestDelete(t *testing.T) { backlogIndexes []arbutil.MessageIndex confirmed arbutil.MessageIndex expectedCount int - expectedStart arbutil.MessageIndex - expectedEnd arbutil.MessageIndex + expectedStart uint64 + expectedEnd uint64 expectedLookupKeys []arbutil.MessageIndex }{ { @@ -286,8 +286,8 @@ func TestGet(t *testing.T) { testcases := []struct { name string - start arbutil.MessageIndex - end arbutil.MessageIndex + start uint64 + end uint64 expectedErr error expectedCount int }{ @@ -403,7 +403,7 @@ func TestBacklogRaceCondition(t *testing.T) { wg.Add(1) go func(t *testing.T, b *backlog) { defer wg.Done() - for _, i := range []arbutil.MessageIndex{42, 43, 44, 45, 46, 47} { + for _, i := range []uint64{42, 43, 44, 45, 46, 47} { bm, err := b.Get(i, i+1) if err != nil { t.Fatalf("unexpected error: %s", err) From fc4db62ca4eb96a91f876da1dddc3b19c010649e Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 12 Oct 2023 10:54:25 +1300 Subject: [PATCH 05/71] add delete method to race condition test --- broadcaster/backlog/backlog_test.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index fe062aee1f..65b5d7095b 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -414,7 +414,20 @@ func TestBacklogRaceCondition(t *testing.T) { } }(t, b) - // Wait for both goroutines to finish + // Delete from backlog in goroutine. This is normally done via Append with + // a confirmed sequence number, using delete method for simplicity in test. + wg.Add(1) + go func(t *testing.T, b *backlog) { + defer wg.Done() + for _, i := range []uint64{40, 43, 47} { + b.delete(i) + time.Sleep(5 * time.Millisecond) + } + }(t, b) + + // Wait for all goroutines to finish wg.Wait() - validateBacklog(t, b, 16, 40, 55, append(indexes, newIndexes...)) + // Messages up to 47 were deleted. However the segment that 47 was in is + // kept, which is why the backlog starts at 46. + validateBacklog(t, b, 10, 46, 55, append(indexes, newIndexes...)) } From d805660793812b835b43209a0ceabf49fcf60677 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 18 Oct 2023 17:18:42 +1300 Subject: [PATCH 06/71] swap catchupbuffer for backlog in wsbroadcastserver --- broadcaster/backlog/backlog.go | 30 +++++- broadcaster/broadcaster.go | 4 +- wsbroadcastserver/clientconnection.go | 123 ++++++++++++++++++++----- wsbroadcastserver/clientmanager.go | 59 ++++-------- wsbroadcastserver/wsbroadcastserver.go | 22 +++-- 5 files changed, 157 insertions(+), 81 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 6feb7576f1..1ea46ee5a9 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -18,9 +18,10 @@ var ( ) type Backlog interface { + Head() BacklogSegment Append(bm *m.BroadcastMessage) error Get(start, end uint64) (*m.BroadcastMessage, error) - MessageCount() int + Count() int } type backlog struct { @@ -40,6 +41,10 @@ func NewBacklog(c ConfigFetcher) Backlog { } } +func (b *backlog) Head() BacklogSegment { + return b.head.Load() +} + // Append will add the given messages to the backlogSegment at head until // that segment reaches its limit. If messages remain to be added a new segment // will be created. @@ -50,6 +55,8 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { // add to metric? } + // TODO(clamb): Do I need a max catchup config for the backlog? Similar to catchup buffer + for _, msg := range bm.Messages { s := b.tail.Load() if s == nil { @@ -59,7 +66,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } prevMsgIdx := s.end.Load() - if s.MessageCount() >= b.config().SegmentLimit { + if s.count() >= b.config().SegmentLimit { nextS := &backlogSegment{} s.nextSegment.Store(nextS) prevMsgIdx = s.end.Load() @@ -207,7 +214,7 @@ func (b *backlog) lookup(i uint64) (*backlogSegment, error) { return s, nil } -func (s *backlog) MessageCount() int { +func (s *backlog) Count() int { return int(s.messageCount.Load()) } @@ -221,6 +228,11 @@ func (b *backlog) reset() { b.messageCount.Store(0) } +type BacklogSegment interface { + Next() BacklogSegment + Messages() []*m.BroadcastFeedMessage +} + type backlogSegment struct { start atomic.Uint64 end atomic.Uint64 @@ -230,6 +242,14 @@ type backlogSegment struct { previousSegment atomic.Pointer[backlogSegment] } +func (s *backlogSegment) Next() BacklogSegment { + return s.nextSegment.Load() +} + +func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { + return s.messages +} + // get reads messages from the given start to end MessageIndex func (s *backlogSegment) get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { noMsgs := []*m.BroadcastFeedMessage{} @@ -291,7 +311,7 @@ func (s *backlogSegment) updateSegment(seen *bool) { } } -// MessageCount returns the number of messages stored in the backlog -func (s *backlogSegment) MessageCount() int { +// count returns the number of messages stored in the backlog segment +func (s *backlogSegment) count() int { return int(s.messageCount.Load()) } diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index bd7a38289a..b96ff1606b 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -80,7 +80,7 @@ func (b *Broadcaster) BroadcastSingleFeedMessage(bfm *m.BroadcastFeedMessage) { func (b *Broadcaster) BroadcastFeedMessages(messages []*m.BroadcastFeedMessage) { - bm := m.BroadcastMessage{ + bm := &m.BroadcastMessage{ Version: 1, Messages: messages, } @@ -90,7 +90,7 @@ func (b *Broadcaster) BroadcastFeedMessages(messages []*m.BroadcastFeedMessage) func (b *Broadcaster) Confirm(seq arbutil.MessageIndex) { log.Debug("confirming sequence number", "sequenceNumber", seq) - b.server.Broadcast(m.BroadcastMessage{ + b.server.Broadcast(&m.BroadcastMessage{ Version: 1, ConfirmedSequenceNumberMessage: &m.ConfirmedSequenceNumberMessage{seq}}) } diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index bdbeccfd23..fff07cf896 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -5,6 +5,7 @@ package wsbroadcastserver import ( "context" + "errors" "fmt" "math/rand" "net" @@ -12,7 +13,9 @@ import ( "sync/atomic" "time" + "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/broadcaster/backlog" "github.com/gobwas/ws" "github.com/gobwas/ws/wsflate" @@ -20,6 +23,8 @@ import ( "github.com/offchainlabs/nitro/util/stopwaiter" ) +var errContextDone = errors.New("context done") + // ClientConnection represents client connection. type ClientConnection struct { stopwaiter.StopWaiter @@ -36,6 +41,8 @@ type ClientConnection struct { lastHeardUnix int64 out chan []byte + backlog backlog.Backlog + registered chan bool compression bool flateReader *wsflate.Reader @@ -51,6 +58,7 @@ func NewClientConnection( connectingIP net.IP, compression bool, delay time.Duration, + bklg backlog.Backlog, ) *ClientConnection { return &ClientConnection{ conn: conn, @@ -65,6 +73,8 @@ func NewClientConnection( compression: compression, flateReader: NewFlateReader(), delay: delay, + backlog: bklg, + registered: make(chan bool, 1), } } @@ -76,33 +86,98 @@ func (cc *ClientConnection) Compression() bool { return cc.compression } +func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.BacklogSegment) (backlog.BacklogSegment, error) { + for segment != nil { + select { + case <-ctx.Done(): + return nil, errContextDone + default: + } + + bm := &m.BroadcastMessage{ + Messages: segment.Messages(), + } + notCompressed, compressed, err := serializeMessage(cc.clientManager, bm, !cc.compression, cc.compression) + if err != nil { + return nil, err + } + + data := []byte{} + if cc.compression { + data = compressed.Bytes() + } else { + data = notCompressed.Bytes() + } + err := cc.writeRaw(data) + if err != nil { + return nil, err + } + log.Debug("segment sent to client", "client", cc.Name, "sentCount", len(bm.Messages)) + + prevSegment = segment + segment = segment.Next() + } + return prevSegment, nil + +} + func (cc *ClientConnection) Start(parentCtx context.Context) { cc.StopWaiter.Start(parentCtx, cc) cc.LaunchThread(func(ctx context.Context) { + // A delay may be configured, ensures the Broadcaster delays before any + // messages are sent to the client. The ClientConnection has not been + // registered so the out channel filling is not a concern. if cc.delay != 0 { - var delayQueue [][]byte t := time.NewTimer(cc.delay) - done := false - for !done { - select { - case <-ctx.Done(): - return - case data := <-cc.out: - delayQueue = append(delayQueue, data) - case <-t.C: - for _, data := range delayQueue { - err := cc.writeRaw(data) - if err != nil { - logWarn(err, "error writing data to client") - cc.clientManager.Remove(cc) - return - } - } - done = true - } + select { + case <-ctx.Done(): + return + case <-t.C: } } + // Send the current backlog before registering the ClientConnection in + // case the backlog is very large + head := cc.backlog.Head() + segment, err := cc.writeBacklog(ctx, head) + if errors.Is(err, errContextDone) { + return + } else if err != nil { + logWarn(err, "error writing messages from backlog") + cc.clientManager.Remove(cc) + return + } + + cc.clientManager.Register(cc) + timer := time.NewTimer(5 * time.Second) + select { + case <-ctx.Done(): + return + case <-cc.registered: + log.Debug("ClientConnection registered with ClientManager", "client", cc.Name) + case <-timer.C: + log.Warn("timed out waiting for ClientConnection to register with ClientManager", "client", cc.Name) + } + + // The backlog may have had more messages added to it whilst the + // ClientConnection registers with the ClientManager, therefore the + // last segment must be sent again. This may result in duplicate + // messages being sent to the client but the client should handle any + // duplicate messages. The ClientConnection can not be registered + // before the backlog is sent as the backlog may be very large. This + // could result in the out channel running out of space. + _, err := cc.writeBacklog(ctx, head) + if errors.Is(err, errContextDone) { + return + } else if err != nil { + logWarn(err, "error writing messages from backlog") + cc.clientManager.Remove(cc) + return + } + + // TODO(clamb): does this still need to consider the requested seq number from the client? currently it just sends everything in the backlog + + // broadcast any new messages sent to the out channel for { select { case <-ctx.Done(): @@ -119,6 +194,12 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { }) } +// Registered is used by the ClientManager to indicate that ClientConnection +// has been registered with the ClientManager +func (cc *ClientConnection) Registered() { + cc.registered <- true +} + func (cc *ClientConnection) StopOnly() { // Ignore errors from conn.Close since we are just shutting down _ = cc.conn.Close() @@ -161,11 +242,11 @@ func (cc *ClientConnection) readRequest(ctx context.Context, timeout time.Durati return data, opCode, err } -func (cc *ClientConnection) Write(x interface{}) error { +func (cc *ClientConnection) Write(bm *m.BroadcastMessage) error { cc.ioMutex.Lock() defer cc.ioMutex.Unlock() - notCompressed, compressed, err := serializeMessage(cc.clientManager, x, !cc.compression, cc.compression) + notCompressed, compressed, err := serializeMessage(cc.clientManager, bm, !cc.compression, cc.compression) if err != nil { return err } diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index f140e6254f..1bfd720442 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "net" "strings" "sync/atomic" "time" @@ -24,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/broadcaster/backlog" "github.com/offchainlabs/nitro/util/stopwaiter" ) @@ -39,13 +38,6 @@ var ( clientsDurationHistogram = metrics.NewRegisteredHistogram("arb/feed/clients/duration", nil, metrics.NewBoundedHistogramSample()) ) -// CatchupBuffer is a Protocol-specific client catch-up logic can be injected using this interface -type CatchupBuffer interface { - OnRegisterClient(*ClientConnection) (error, int, time.Duration) - OnDoBroadcast(interface{}) error - GetMessageCount() int -} - // ClientManager manages client connections type ClientManager struct { stopwaiter.StopWaiter @@ -54,10 +46,10 @@ type ClientManager struct { clientCount int32 pool *gopool.Pool poller netpoll.Poller - broadcastChan chan interface{} + broadcastChan chan *m.BroadcastMessage clientAction chan ClientConnectionAction config BroadcasterConfigFetcher - catchupBuffer CatchupBuffer + backlog backlog.Backlog flateWriter *flate.Writer connectionLimiter *ConnectionLimiter @@ -68,16 +60,16 @@ type ClientConnectionAction struct { create bool } -func NewClientManager(poller netpoll.Poller, configFetcher BroadcasterConfigFetcher, catchupBuffer CatchupBuffer) *ClientManager { +func NewClientManager(poller netpoll.Poller, configFetcher BroadcasterConfigFetcher, bklg backlog.Backlog) *ClientManager { config := configFetcher() return &ClientManager{ poller: poller, pool: gopool.NewPool(config.Workers, config.Queue, 1), clientPtrMap: make(map[*ClientConnection]bool), - broadcastChan: make(chan interface{}, 1), + broadcastChan: make(chan *m.BroadcastMessage, 1), clientAction: make(chan ClientConnectionAction, 128), config: configFetcher, - catchupBuffer: catchupBuffer, + backlog: bklg, connectionLimiter: NewConnectionLimiter(func() *ConnectionLimiterConfig { return &configFetcher().ConnectionLimits }), } } @@ -89,6 +81,8 @@ func (cm *ClientManager) registerClient(ctx context.Context, clientConnection *C } }() + // TODO:(clamb) the clientsTotalFailedRegisterCounter was deleted after backlog logic moved to ClientConnection. Should this metric be reintroduced or will it be ok to just delete completely given the behaviour has changed, ask Lee + if cm.config().ConnectionLimits.Enable && !cm.connectionLimiter.Register(clientConnection.clientIp) { return fmt.Errorf("Connection limited %s", clientConnection.clientIp) } @@ -97,40 +91,18 @@ func (cm *ClientManager) registerClient(ctx context.Context, clientConnection *C clientsConnectCount.Inc(1) atomic.AddInt32(&cm.clientCount, 1) - err, sent, elapsed := cm.catchupBuffer.OnRegisterClient(clientConnection) - if err != nil { - clientsTotalFailedRegisterCounter.Inc(1) - if cm.config().ConnectionLimits.Enable { - cm.connectionLimiter.Release(clientConnection.clientIp) - } - return err - } - if cm.config().LogConnect { - log.Info("client registered", "client", clientConnection.Name, "requestedSeqNum", clientConnection.RequestedSeqNum(), "sentCount", sent, "elapsed", elapsed) - } - - clientConnection.Start(ctx) cm.clientPtrMap[clientConnection] = true clientsTotalSuccessCounter.Inc(1) return nil } -// Register registers new connection as a Client. -func (cm *ClientManager) Register( - conn net.Conn, - desc *netpoll.Desc, - requestedSeqNum arbutil.MessageIndex, - connectingIP net.IP, - compression bool, -) *ClientConnection { - createClient := ClientConnectionAction{ - NewClientConnection(conn, desc, cm, requestedSeqNum, connectingIP, compression, cm.config().ClientDelay), +// Register registers given connection as a Client. +func (cm *ClientManager) Register(clientConnection *ClientConnection) { + cm.clientAction <- ClientConnectionAction{ + clientConnection, true, } - cm.clientAction <- createClient - - return createClient.cc } // removeAll removes all clients after main ClientManager thread exits @@ -189,7 +161,7 @@ func (cm *ClientManager) ClientCount() int32 { } // Broadcast sends batch item to all clients. -func (cm *ClientManager) Broadcast(bm interface{}) { +func (cm *ClientManager) Broadcast(bm *m.BroadcastMessage) { if cm.Stopped() { // This should only occur if a reorg occurs after the broadcast server is stopped, // with the sequencer enabled but not the sequencer coordinator. @@ -199,8 +171,8 @@ func (cm *ClientManager) Broadcast(bm interface{}) { cm.broadcastChan <- bm } -func (cm *ClientManager) doBroadcast(bm interface{}) ([]*ClientConnection, error) { - if err := cm.catchupBuffer.OnDoBroadcast(bm); err != nil { +func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnection, error) { + if err := cm.backlog.Append(bm); err != nil { return nil, err } config := cm.config() @@ -348,6 +320,7 @@ func (cm *ClientManager) Start(parentCtx context.Context) { // Log message already output in registerClient cm.removeClientImpl(clientAction.cc) } + clientAction.cc.Registered() } else { cm.removeClient(clientAction.cc) } diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index d51b368400..02adaa370c 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/broadcaster/backlog" ) var ( @@ -163,18 +164,18 @@ type WSBroadcastServer struct { config BroadcasterConfigFetcher started bool clientManager *ClientManager - catchupBuffer CatchupBuffer + backlog backlog.Backlog chainId uint64 fatalErrChan chan error } -func NewWSBroadcastServer(config BroadcasterConfigFetcher, catchupBuffer CatchupBuffer, chainId uint64, fatalErrChan chan error) *WSBroadcastServer { +func NewWSBroadcastServer(config BroadcasterConfigFetcher, bklg backlog.Backlog, chainId uint64, fatalErrChan chan error) *WSBroadcastServer { return &WSBroadcastServer{ - config: config, - started: false, - catchupBuffer: catchupBuffer, - chainId: chainId, - fatalErrChan: fatalErrChan, + config: config, + started: false, + backlog: bklg, + chainId: chainId, + fatalErrChan: fatalErrChan, } } @@ -192,7 +193,7 @@ func (s *WSBroadcastServer) Initialize() error { // Make pool of X size, Y sized work queue and one pre-spawned // goroutine. - s.clientManager = NewClientManager(s.poller, s.config, s.catchupBuffer) + s.clientManager = NewClientManager(s.poller, s.config, s.backlog) return nil } @@ -372,7 +373,8 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands // Register incoming client in clientManager. safeConn := writeDeadliner{conn, config.WriteTimeout} - client := s.clientManager.Register(safeConn, desc, requestedSeqNum, connectingIP, compressionAccepted) + client := NewClientConnection(safeConn, desc, s.clientManager, requestedSeqNum, connectingIP, compressionAccepted, s.config().ClientDelay, s.backlog) + client.Start(ctx) // Subscribe to events about conn. err = s.poller.Start(desc, func(ev netpoll.Event) { @@ -528,7 +530,7 @@ func (s *WSBroadcastServer) Started() bool { } // Broadcast sends batch item to all clients. -func (s *WSBroadcastServer) Broadcast(bm interface{}) { +func (s *WSBroadcastServer) Broadcast(bm *m.BroadcastMessage) { s.clientManager.Broadcast(bm) } From 54ea210b0e73c7a724060fa1f55e7598121e0525 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 19 Oct 2023 20:48:46 +1300 Subject: [PATCH 07/71] fix go vet errors --- broadcaster/backlog/backlog_test.go | 20 ++++++++++++----- broadcaster/broadcaster.go | 28 +++++++++++++----------- broadcaster/broadcaster_test.go | 30 +++++++++++++++----------- wsbroadcastserver/clientconnection.go | 6 ++++-- wsbroadcastserver/clientmanager.go | 1 + wsbroadcastserver/wsbroadcastserver.go | 5 +++++ 6 files changed, 58 insertions(+), 32 deletions(-) diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 65b5d7095b..3683c61086 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -13,8 +13,8 @@ import ( ) func validateBacklog(t *testing.T, b *backlog, count int, start, end uint64, lookupKeys []arbutil.MessageIndex) { - if b.MessageCount() != count { - t.Errorf("backlog message count (%d) does not equal expected message count (%d)", b.MessageCount(), count) + if b.Count() != count { + t.Errorf("backlog message count (%d) does not equal expected message count (%d)", b.Count(), count) } head := b.head.Load() @@ -387,13 +387,15 @@ func TestBacklogRaceCondition(t *testing.T) { // Write to backlog in goroutine wg.Add(1) + errs := make(chan error, 15) go func(t *testing.T, b *backlog) { defer wg.Done() for _, i := range newIndexes { bm := m.CreateDummyBroadcastMessage([]arbutil.MessageIndex{i}) err := b.Append(bm) + errs <- err if err != nil { - t.Fatalf("unexpected error: %s", err) + return } time.Sleep(time.Millisecond) } @@ -405,8 +407,9 @@ func TestBacklogRaceCondition(t *testing.T) { defer wg.Done() for _, i := range []uint64{42, 43, 44, 45, 46, 47} { bm, err := b.Get(i, i+1) + errs <- err if err != nil { - t.Fatalf("unexpected error: %s", err) + return } else { validateBroadcastMessage(t, bm, 2, i, i+1) } @@ -425,8 +428,15 @@ func TestBacklogRaceCondition(t *testing.T) { } }(t, b) - // Wait for all goroutines to finish + // Wait for all goroutines to finish or return errors wg.Wait() + close(errs) + for err = range errs { + + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + } // Messages up to 47 were deleted. However the segment that 47 was in is // kept, which is why the backlog starts at 46. validateBacklog(t, b, 10, 46, 55, append(indexes, newIndexes...)) diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index b96ff1606b..270d153afd 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -13,25 +13,26 @@ import ( "github.com/offchainlabs/nitro/arbos/arbostypes" "github.com/offchainlabs/nitro/arbutil" + "github.com/offchainlabs/nitro/broadcaster/backlog" m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/signature" "github.com/offchainlabs/nitro/wsbroadcastserver" ) type Broadcaster struct { - server *wsbroadcastserver.WSBroadcastServer - catchupBuffer *SequenceNumberCatchupBuffer - chainId uint64 - dataSigner signature.DataSignerFunc + server *wsbroadcastserver.WSBroadcastServer + backlog backlog.Backlog + chainId uint64 + dataSigner signature.DataSignerFunc } func NewBroadcaster(config wsbroadcastserver.BroadcasterConfigFetcher, chainId uint64, feedErrChan chan error, dataSigner signature.DataSignerFunc) *Broadcaster { - catchupBuffer := NewSequenceNumberCatchupBuffer(func() bool { return config().LimitCatchup }, func() int { return config().MaxCatchup }) + bklg := backlog.NewBacklog(func() *backlog.Config { return &config().Backlog }) return &Broadcaster{ - server: wsbroadcastserver.NewWSBroadcastServer(config, catchupBuffer, chainId, feedErrChan), - catchupBuffer: catchupBuffer, - chainId: chainId, - dataSigner: dataSigner, + server: wsbroadcastserver.NewWSBroadcastServer(config, bklg, chainId, feedErrChan), + backlog: bklg, + chainId: chainId, + dataSigner: dataSigner, } } @@ -91,8 +92,11 @@ func (b *Broadcaster) BroadcastFeedMessages(messages []*m.BroadcastFeedMessage) func (b *Broadcaster) Confirm(seq arbutil.MessageIndex) { log.Debug("confirming sequence number", "sequenceNumber", seq) b.server.Broadcast(&m.BroadcastMessage{ - Version: 1, - ConfirmedSequenceNumberMessage: &m.ConfirmedSequenceNumberMessage{seq}}) + Version: 1, + ConfirmedSequenceNumberMessage: &m.ConfirmedSequenceNumberMessage{ + SequenceNumber: seq, + }, + }) } func (b *Broadcaster) ClientCount() int32 { @@ -104,7 +108,7 @@ func (b *Broadcaster) ListenerAddr() net.Addr { } func (b *Broadcaster) GetCachedMessageCount() int { - return b.catchupBuffer.GetMessageCount() + return b.backlog.Count() } func (b *Broadcaster) Initialize() error { diff --git a/broadcaster/broadcaster_test.go b/broadcaster/broadcaster_test.go index a97facef30..79291e023d 100644 --- a/broadcaster/broadcaster_test.go +++ b/broadcaster/broadcaster_test.go @@ -44,7 +44,7 @@ type messageCountPredicate struct { } func (p *messageCountPredicate) Test() bool { - p.was = p.b.catchupBuffer.GetMessageCount() + p.was = p.b.backlog.Count() return p.was == p.expected } @@ -78,26 +78,30 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { waitUntilUpdated(t, expectMessageCount(3, "after 3 messages")) Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 4)) waitUntilUpdated(t, expectMessageCount(4, "after 4 messages")) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 5)) + waitUntilUpdated(t, expectMessageCount(5, "after 4 messages")) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 6)) + waitUntilUpdated(t, expectMessageCount(6, "after 4 messages")) - b.Confirm(1) + b.Confirm(4) waitUntilUpdated(t, expectMessageCount(3, - "after 4 messages, 1 cleared by confirm")) + "after 6 messages, 4 cleared by confirm (first backlog segment)")) - b.Confirm(2) - waitUntilUpdated(t, expectMessageCount(2, - "after 4 messages, 2 cleared by confirm")) + b.Confirm(5) + waitUntilUpdated(t, expectMessageCount(3, + "after 6 messages, 5 cleared by confirm, but segment containing 5th message remains in backlog")) - b.Confirm(1) - waitUntilUpdated(t, expectMessageCount(2, + b.Confirm(4) + waitUntilUpdated(t, expectMessageCount(3, "nothing changed because confirmed sequence number before cache")) - b.Confirm(2) - Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 5)) - waitUntilUpdated(t, expectMessageCount(3, - "after 5 messages, 2 cleared by confirm")) + b.Confirm(5) + Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 7)) + waitUntilUpdated(t, expectMessageCount(4, + "after 7 messages, 4 cleared by confirm, but segment containing 5th message remains in backlog")) // Confirm not-yet-seen or already confirmed/cleared sequence numbers twice to force clearing cache - b.Confirm(6) + b.Confirm(8) waitUntilUpdated(t, expectMessageCount(0, "clear all messages after confirmed 1 beyond latest")) } diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index fff07cf896..82fd3ce6c0 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster/backlog" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/gobwas/ws" "github.com/gobwas/ws/wsflate" @@ -87,6 +88,7 @@ func (cc *ClientConnection) Compression() bool { } func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.BacklogSegment) (backlog.BacklogSegment, error) { + var prevSegment backlog.BacklogSegment for segment != nil { select { case <-ctx.Done(): @@ -108,7 +110,7 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba } else { data = notCompressed.Bytes() } - err := cc.writeRaw(data) + err = cc.writeRaw(data) if err != nil { return nil, err } @@ -166,7 +168,7 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { // duplicate messages. The ClientConnection can not be registered // before the backlog is sent as the backlog may be very large. This // could result in the out channel running out of space. - _, err := cc.writeBacklog(ctx, head) + _, err = cc.writeBacklog(ctx, segment) if errors.Is(err, errContextDone) { return } else if err != nil { diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 1bfd720442..1de63a833e 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/broadcaster/backlog" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/stopwaiter" ) diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 02adaa370c..3a98871fec 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster/backlog" + m "github.com/offchainlabs/nitro/broadcaster/message" ) var ( @@ -67,6 +68,7 @@ type BroadcasterConfig struct { MaxCatchup int `koanf:"max-catchup" reload:"hot"` ConnectionLimits ConnectionLimiterConfig `koanf:"connection-limits" reload:"hot"` ClientDelay time.Duration `koanf:"client-delay" reload:"hot"` + Backlog backlog.Config `koanf:"backlog" reload:"hot"` } func (bc *BroadcasterConfig) Validate() error { @@ -101,6 +103,7 @@ func BroadcasterConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".max-catchup", DefaultBroadcasterConfig.MaxCatchup, "the maximum size of the catchup buffer (-1 means unlimited)") ConnectionLimiterConfigAddOptions(prefix+".connection-limits", f) f.Duration(prefix+".client-delay", DefaultBroadcasterConfig.ClientDelay, "delay the first messages sent to each client by this amount") + backlog.AddOptions(prefix+".backlog", f) } var DefaultBroadcasterConfig = BroadcasterConfig{ @@ -126,6 +129,7 @@ var DefaultBroadcasterConfig = BroadcasterConfig{ MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, + Backlog: backlog.DefaultConfig, } var DefaultTestBroadcasterConfig = BroadcasterConfig{ @@ -151,6 +155,7 @@ var DefaultTestBroadcasterConfig = BroadcasterConfig{ MaxCatchup: -1, ConnectionLimits: DefaultConnectionLimiterConfig, ClientDelay: 0, + Backlog: backlog.DefaultTestConfig, } type WSBroadcastServer struct { From 946046acd17d60ea5499501759c4a6e34f742bd0 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 1 Nov 2023 14:35:47 +1300 Subject: [PATCH 08/71] use Lookup() and requestedSeqNum instead of Head() when sending messages from the backlog --- broadcaster/backlog/backlog.go | 133 ++++++++++++++++---------- broadcaster/backlog/backlog_test.go | 15 ++- wsbroadcastserver/clientconnection.go | 13 ++- wsbroadcastserver/clientmanager.go | 2 +- 4 files changed, 97 insertions(+), 66 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 1ea46ee5a9..32cf7bfd56 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -18,10 +18,9 @@ var ( ) type Backlog interface { - Head() BacklogSegment - Append(bm *m.BroadcastMessage) error - Get(start, end uint64) (*m.BroadcastMessage, error) - Count() int + Append(*m.BroadcastMessage) error + Count() uint64 + Lookup(uint64) (BacklogSegment, error) } type backlog struct { @@ -41,10 +40,6 @@ func NewBacklog(c ConfigFetcher) Backlog { } } -func (b *backlog) Head() BacklogSegment { - return b.head.Load() -} - // Append will add the given messages to the backlogSegment at head until // that segment reaches its limit. If messages remain to be added a new segment // will be created. @@ -52,35 +47,35 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { if bm.ConfirmedSequenceNumberMessage != nil { b.delete(uint64(bm.ConfirmedSequenceNumberMessage.SequenceNumber)) - // add to metric? + // TODO(clamb): add to metric? } // TODO(clamb): Do I need a max catchup config for the backlog? Similar to catchup buffer for _, msg := range bm.Messages { - s := b.tail.Load() - if s == nil { - s = &backlogSegment{} - b.head.Store(s) - b.tail.Store(s) + segment := b.tail.Load() + if segment == nil { + segment = newBacklogSegment() + b.head.Store(segment) + b.tail.Store(segment) } - prevMsgIdx := s.end.Load() - if s.count() >= b.config().SegmentLimit { - nextS := &backlogSegment{} - s.nextSegment.Store(nextS) - prevMsgIdx = s.end.Load() - nextS.previousSegment.Store(s) - s = nextS - b.tail.Store(s) + prevMsgIdx := segment.End() + if segment.count() >= b.config().SegmentLimit { + nextSegment := newBacklogSegment() + segment.nextSegment.Store(nextSegment) + prevMsgIdx = segment.End() + nextSegment.previousSegment.Store(segment) + segment = nextSegment + b.tail.Store(segment) } - err := s.append(prevMsgIdx, msg) + err := segment.append(prevMsgIdx, msg) if errors.Is(err, errDropSegments) { head := b.head.Load() - b.removeFromLookup(head.start.Load(), uint64(msg.SequenceNumber)) - b.head.Store(s) - b.tail.Store(s) + b.removeFromLookup(head.Start(), uint64(msg.SequenceNumber)) + b.head.Store(segment) + b.tail.Store(segment) b.messageCount.Store(0) log.Warn(err.Error()) } else if errors.Is(err, errSequenceNumberSeen) { @@ -90,7 +85,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { return err } p := &atomic.Pointer[backlogSegment]{} - p.Store(s) + p.Store(segment) b.lookupLock.Lock() b.lookupByIndex[uint64(msg.SequenceNumber)] = p b.lookupLock.Unlock() @@ -100,23 +95,24 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { return nil } -// Get reads messages from the given start to end MessageIndex -func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { +// get reads messages from the given start to end MessageIndex. It was created +// for the original implementation of the backlog but currently is not used. +func (b *backlog) get(start, end uint64) (*m.BroadcastMessage, error) { head := b.head.Load() tail := b.tail.Load() if head == nil && tail == nil { return nil, errOutOfBounds } - if start < head.start.Load() { - start = head.start.Load() + if start < head.Start() { + start = head.Start() } - if end > tail.end.Load() { + if end > tail.End() { return nil, errOutOfBounds } - s, err := b.lookup(start) + segment, err := b.Lookup(start) if err != nil { return nil, err } @@ -124,16 +120,16 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { bm := &m.BroadcastMessage{Version: 1} required := int(end-start) + 1 for { - segMsgs, err := s.get(arbmath.MaxInt(start, s.start.Load()), arbmath.MinInt(end, s.end.Load())) + segMsgs, err := segment.Get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End())) if err != nil { return nil, err } bm.Messages = append(bm.Messages, segMsgs...) - s = s.nextSegment.Load() + segment = segment.Next() if len(bm.Messages) == required { break - } else if s == nil { + } else if segment == nil { return nil, errOutOfBounds } } @@ -149,11 +145,11 @@ func (b *backlog) delete(confirmed uint64) { return } - if confirmed < head.start.Load() { + if confirmed < head.Start() { return } - if confirmed > tail.end.Load() { + if confirmed > tail.End() { log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.end.Load()) b.reset() // should this be returning an error? The other buffer does not and just continues @@ -161,7 +157,7 @@ func (b *backlog) delete(confirmed uint64) { } // find the segment containing the confirmed message - s, err := b.lookup(confirmed) + segment, err := b.Lookup(confirmed) if err != nil { log.Error(fmt.Sprintf("%s: clearing backlog", err.Error())) b.reset() @@ -170,7 +166,7 @@ func (b *backlog) delete(confirmed uint64) { } // check the segment actually contains that message - if found := s.contains(confirmed); !found { + if found := segment.Contains(confirmed); !found { log.Error("error message not found in backlog segment, clearing backlog", "confirmed sequence number", confirmed) b.reset() // should this be returning an error? The other buffer does not and just continues @@ -178,13 +174,13 @@ func (b *backlog) delete(confirmed uint64) { } // remove all previous segments - previous := s.previousSegment.Load() - if previous == nil { + previous := segment.Previous() + if IsBacklogSegmentNil(previous) { return } - b.removeFromLookup(head.start.Load(), previous.end.Load()) - b.head.Store(s) - count := b.messageCount.Load() + head.start.Load() - previous.end.Load() - uint64(1) + b.removeFromLookup(head.Start(), previous.End()) + b.head.Store(segment.(*backlogSegment)) + count := b.Count() + head.Start() - previous.End() - uint64(1) b.messageCount.Store(count) } @@ -198,7 +194,7 @@ func (b *backlog) removeFromLookup(start, end uint64) { } } -func (b *backlog) lookup(i uint64) (*backlogSegment, error) { +func (b *backlog) Lookup(i uint64) (BacklogSegment, error) { b.lookupLock.RLock() pointer, ok := b.lookupByIndex[i] b.lookupLock.RUnlock() @@ -214,8 +210,8 @@ func (b *backlog) lookup(i uint64) (*backlogSegment, error) { return s, nil } -func (s *backlog) Count() int { - return int(s.messageCount.Load()) +func (s *backlog) Count() uint64 { + return s.messageCount.Load() } // reset removes all segments from the backlog @@ -229,7 +225,12 @@ func (b *backlog) reset() { } type BacklogSegment interface { + Start() uint64 + End() uint64 + Contains(uint64) bool Next() BacklogSegment + Previous() BacklogSegment + Get(uint64, uint64) ([]*m.BroadcastFeedMessage, error) Messages() []*m.BroadcastFeedMessage } @@ -242,16 +243,44 @@ type backlogSegment struct { previousSegment atomic.Pointer[backlogSegment] } +// newBacklogSegment creates a backlogSegment object with an empty slice of +// messages. It does not return an interface as it is only used inside the +// backlog library. +func newBacklogSegment() *backlogSegment { + return &backlogSegment{ + messages: []*m.BroadcastFeedMessage{}, + } +} + +// IsBacklogSegmentNil uses the internal backlogSegment type to check if a +// variable of type BacklogSegment is nil or not. Comparing whether an +// interface is nil directly will not work. +func IsBacklogSegmentNil(segment BacklogSegment) bool { + return segment.(*backlogSegment) == nil +} + +func (s *backlogSegment) Start() uint64 { + return uint64(s.start.Load()) +} + +func (s *backlogSegment) End() uint64 { + return uint64(s.end.Load()) +} + func (s *backlogSegment) Next() BacklogSegment { return s.nextSegment.Load() } +func (s *backlogSegment) Previous() BacklogSegment { + return s.previousSegment.Load() +} + func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { return s.messages } -// get reads messages from the given start to end MessageIndex -func (s *backlogSegment) get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { +// Get reads messages from the given start to end MessageIndex +func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { noMsgs := []*m.BroadcastFeedMessage{} if start < s.start.Load() { return noMsgs, errOutOfBounds @@ -288,8 +317,8 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) return nil } -// contains confirms whether the segment contains a message with the given sequence number -func (s *backlogSegment) contains(i uint64) bool { +// Contains confirms whether the segment contains a message with the given sequence number +func (s *backlogSegment) Contains(i uint64) bool { if i < s.start.Load() || i > s.end.Load() { return false } diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 3683c61086..559480bfef 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -12,7 +12,7 @@ import ( "github.com/offchainlabs/nitro/util/arbmath" ) -func validateBacklog(t *testing.T, b *backlog, count int, start, end uint64, lookupKeys []arbutil.MessageIndex) { +func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupKeys []arbutil.MessageIndex) { if b.Count() != count { t.Errorf("backlog message count (%d) does not equal expected message count (%d)", b.Count(), count) } @@ -28,7 +28,7 @@ func validateBacklog(t *testing.T, b *backlog, count int, start, end uint64, loo } for _, k := range lookupKeys { - if _, err := b.lookup(uint64(k)); err != nil { + if _, err := b.Lookup(uint64(k)); err != nil { t.Errorf("failed to find message (%d) in lookup", k) } } @@ -64,7 +64,7 @@ func TestAppend(t *testing.T) { name string backlogIndexes []arbutil.MessageIndex newIndexes []arbutil.MessageIndex - expectedCount int + expectedCount uint64 expectedStart uint64 expectedEnd uint64 expectedLookupKeys []arbutil.MessageIndex @@ -187,7 +187,7 @@ func TestDelete(t *testing.T) { name string backlogIndexes []arbutil.MessageIndex confirmed arbutil.MessageIndex - expectedCount int + expectedCount uint64 expectedStart uint64 expectedEnd uint64 expectedLookupKeys []arbutil.MessageIndex @@ -252,7 +252,6 @@ func TestDelete(t *testing.T) { SequenceNumber: tc.confirmed, }, } - err = b.Append(bm) if err != nil { t.Fatalf("error appending BroadcastMessage: %s", err) @@ -271,7 +270,7 @@ func TestGetEmptyBacklog(t *testing.T) { t.Fatalf("error creating dummy backlog: %s", err) } - _, err = b.Get(1, 2) + _, err = b.get(1, 2) if !errors.Is(err, errOutOfBounds) { t.Fatalf("unexpected error: %s", err) } @@ -358,7 +357,7 @@ func TestGet(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - bm, err := b.Get(tc.start, tc.end) + bm, err := b.get(tc.start, tc.end) if !errors.Is(err, tc.expectedErr) { t.Fatalf("unexpected error: %s", err) } @@ -406,7 +405,7 @@ func TestBacklogRaceCondition(t *testing.T) { go func(t *testing.T, b *backlog) { defer wg.Done() for _, i := range []uint64{42, 43, 44, 45, 46, 47} { - bm, err := b.Get(i, i+1) + bm, err := b.get(i, i+1) errs <- err if err != nil { return diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 82fd3ce6c0..c7ecfbda9f 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -89,7 +89,7 @@ func (cc *ClientConnection) Compression() bool { func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.BacklogSegment) (backlog.BacklogSegment, error) { var prevSegment backlog.BacklogSegment - for segment != nil { + for !backlog.IsBacklogSegmentNil(segment) { select { case <-ctx.Done(): return nil, errContextDone @@ -140,8 +140,13 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { // Send the current backlog before registering the ClientConnection in // case the backlog is very large - head := cc.backlog.Head() - segment, err := cc.writeBacklog(ctx, head) + segment, err := cc.backlog.Lookup(cc.requestedSeqNum) + if err != nil { + logWarn(err, "error finding requested sequence number in backlog") + cc.clientManager.Remove(cc) + return + } + segment, err = cc.writeBacklog(ctx, segment) if errors.Is(err, errContextDone) { return } else if err != nil { @@ -177,8 +182,6 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { return } - // TODO(clamb): does this still need to consider the requested seq number from the client? currently it just sends everything in the backlog - // broadcast any new messages sent to the out channel for { select { diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 1de63a833e..b2790f850f 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -227,7 +227,7 @@ func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnectio return clientDeleteList, nil } -func serializeMessage(cm *ClientManager, bm interface{}, enableNonCompressedOutput, enableCompressedOutput bool) (bytes.Buffer, bytes.Buffer, error) { +func serializeMessage(cm *ClientManager, bm *m.BroadcastMessage, enableNonCompressedOutput, enableCompressedOutput bool) (bytes.Buffer, bytes.Buffer, error) { var notCompressed bytes.Buffer var compressed bytes.Buffer writers := []io.Writer{} From ff440a4ca00d043efabedc9ff1733fee85022a39 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 1 Nov 2023 14:37:32 +1300 Subject: [PATCH 09/71] change warn to error --- wsbroadcastserver/clientconnection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index c7ecfbda9f..f97516f695 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -163,7 +163,7 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { case <-cc.registered: log.Debug("ClientConnection registered with ClientManager", "client", cc.Name) case <-timer.C: - log.Warn("timed out waiting for ClientConnection to register with ClientManager", "client", cc.Name) + log.Error("timed out waiting for ClientConnection to register with ClientManager", "client", cc.Name) } // The backlog may have had more messages added to it whilst the From 5d36e6b4033b249fc71432fa3e2976ef0f9f76db Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 1 Nov 2023 17:23:47 +1300 Subject: [PATCH 10/71] change lookupByIndex map values from atomic pointer to backlogSegment to normal pointer to backlogSegment --- broadcaster/backlog/backlog.go | 19 ++++++------------- broadcaster/backlog/backlog_test.go | 9 +++------ 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 32cf7bfd56..850aff3c71 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -27,13 +27,13 @@ type backlog struct { head atomic.Pointer[backlogSegment] tail atomic.Pointer[backlogSegment] lookupLock sync.RWMutex - lookupByIndex map[uint64]*atomic.Pointer[backlogSegment] + lookupByIndex map[uint64]*backlogSegment config ConfigFetcher messageCount atomic.Uint64 } func NewBacklog(c ConfigFetcher) Backlog { - lookup := make(map[uint64]*atomic.Pointer[backlogSegment]) + lookup := make(map[uint64]*backlogSegment) return &backlog{ lookupByIndex: lookup, config: c, @@ -84,10 +84,8 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } else if err != nil { return err } - p := &atomic.Pointer[backlogSegment]{} - p.Store(segment) b.lookupLock.Lock() - b.lookupByIndex[uint64(msg.SequenceNumber)] = p + b.lookupByIndex[uint64(msg.SequenceNumber)] = segment b.lookupLock.Unlock() b.messageCount.Add(1) } @@ -196,18 +194,13 @@ func (b *backlog) removeFromLookup(start, end uint64) { func (b *backlog) Lookup(i uint64) (BacklogSegment, error) { b.lookupLock.RLock() - pointer, ok := b.lookupByIndex[i] + segment, ok := b.lookupByIndex[i] b.lookupLock.RUnlock() if !ok { return nil, fmt.Errorf("error finding backlog segment containing message with SequenceNumber %d", i) } - s := pointer.Load() - if s == nil { - return nil, fmt.Errorf("error loading backlog segment containing message with SequenceNumber %d", i) - } - - return s, nil + return segment, nil } func (s *backlog) Count() uint64 { @@ -220,7 +213,7 @@ func (b *backlog) reset() { defer b.lookupLock.Unlock() b.head = atomic.Pointer[backlogSegment]{} b.tail = atomic.Pointer[backlogSegment]{} - b.lookupByIndex = map[uint64]*atomic.Pointer[backlogSegment]{} + b.lookupByIndex = map[uint64]*backlogSegment{} b.messageCount.Store(0) } diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 559480bfef..8872be841d 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -3,7 +3,6 @@ package backlog import ( "errors" "sync" - "sync/atomic" "testing" "time" @@ -51,7 +50,7 @@ func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCoun func createDummyBacklog(indexes []arbutil.MessageIndex) (*backlog, error) { b := &backlog{ - lookupByIndex: map[uint64]*atomic.Pointer[backlogSegment]{}, + lookupByIndex: map[uint64]*backlogSegment{}, config: func() *Config { return &DefaultTestConfig }, } bm := &m.BroadcastMessage{Messages: m.CreateDummyBroadcastMessages(indexes)} @@ -155,10 +154,8 @@ func TestDeleteInvalidBacklog(t *testing.T) { s.end.Store(42) s.messageCount.Store(2) - p := &atomic.Pointer[backlogSegment]{} - p.Store(s) - lookup := make(map[uint64]*atomic.Pointer[backlogSegment]) - lookup[40] = p + lookup := make(map[uint64]*backlogSegment) + lookup[40] = s b := &backlog{ lookupByIndex: lookup, config: func() *Config { return &DefaultTestConfig }, From bff53fca1da330118b15e2205eca9a3aec8575ef Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 1 Nov 2023 17:37:14 +1300 Subject: [PATCH 11/71] remove max catchup TODO - not necessary --- broadcaster/backlog/backlog.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 850aff3c71..7cff4028c9 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -50,8 +50,6 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { // TODO(clamb): add to metric? } - // TODO(clamb): Do I need a max catchup config for the backlog? Similar to catchup buffer - for _, msg := range bm.Messages { segment := b.tail.Load() if segment == nil { From 1faa39ecef1af208530abfcab666fa74b11f1a52 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 1 Nov 2023 17:44:34 +1300 Subject: [PATCH 12/71] fix broadcaster tests after Head -> Lookup change in clientconnection --- broadcaster/broadcaster.go | 2 +- broadcaster/broadcaster_test.go | 2 +- wsbroadcastserver/clientconnection.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/broadcaster/broadcaster.go b/broadcaster/broadcaster.go index 270d153afd..8a70e39810 100644 --- a/broadcaster/broadcaster.go +++ b/broadcaster/broadcaster.go @@ -108,7 +108,7 @@ func (b *Broadcaster) ListenerAddr() net.Addr { } func (b *Broadcaster) GetCachedMessageCount() int { - return b.backlog.Count() + return int(b.backlog.Count()) } func (b *Broadcaster) Initialize() error { diff --git a/broadcaster/broadcaster_test.go b/broadcaster/broadcaster_test.go index 79291e023d..884f1de1d6 100644 --- a/broadcaster/broadcaster_test.go +++ b/broadcaster/broadcaster_test.go @@ -44,7 +44,7 @@ type messageCountPredicate struct { } func (p *messageCountPredicate) Test() bool { - p.was = p.b.backlog.Count() + p.was = p.b.GetCachedMessageCount() return p.was == p.expected } diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index f97516f695..c457d9a22d 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -140,7 +140,7 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { // Send the current backlog before registering the ClientConnection in // case the backlog is very large - segment, err := cc.backlog.Lookup(cc.requestedSeqNum) + segment, err := cc.backlog.Lookup(uint64(cc.requestedSeqNum)) if err != nil { logWarn(err, "error finding requested sequence number in backlog") cc.clientManager.Remove(cc) From 06d72a8411337ea483f71505ec5aeb8fb2a5c32b Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 2 Nov 2023 15:20:14 +1300 Subject: [PATCH 13/71] fix broadcast client invalid signature test --- wsbroadcastserver/clientconnection.go | 1 + 1 file changed, 1 insertion(+) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index c457d9a22d..907108b6bd 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -97,6 +97,7 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba } bm := &m.BroadcastMessage{ + Version: 1, // TODO(clamb): I am unsure if it is correct to hard code the version here like this? It seems to be done in other places though Messages: segment.Messages(), } notCompressed, compressed, err := serializeMessage(cc.clientManager, bm, !cc.compression, cc.compression) From a0bf64b7bd46860664ae46db91fa6c7288d157e8 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Tue, 7 Nov 2023 16:07:06 +1300 Subject: [PATCH 14/71] fix backlog sending duplicate messages to clients --- broadcaster/backlog/backlog.go | 6 +- broadcaster/backlog/backlog_test.go | 6 +- broadcaster/sequencenumbercatchupbuffer.go | 194 ------------- .../sequencenumbercatchupbuffer_test.go | 255 ------------------ wsbroadcastserver/clientconnection.go | 124 +++++---- wsbroadcastserver/clientmanager.go | 24 +- 6 files changed, 93 insertions(+), 516 deletions(-) delete mode 100644 broadcaster/sequencenumbercatchupbuffer.go delete mode 100644 broadcaster/sequencenumbercatchupbuffer_test.go diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 7cff4028c9..57b4810078 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -19,6 +19,7 @@ var ( type Backlog interface { Append(*m.BroadcastMessage) error + Get(uint64, uint64) (*m.BroadcastMessage, error) Count() uint64 Lookup(uint64) (BacklogSegment, error) } @@ -91,9 +92,8 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { return nil } -// get reads messages from the given start to end MessageIndex. It was created -// for the original implementation of the backlog but currently is not used. -func (b *backlog) get(start, end uint64) (*m.BroadcastMessage, error) { +// Get reads messages from the given start to end MessageIndex. +func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { head := b.head.Load() tail := b.tail.Load() if head == nil && tail == nil { diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 8872be841d..4a31e6c579 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -267,7 +267,7 @@ func TestGetEmptyBacklog(t *testing.T) { t.Fatalf("error creating dummy backlog: %s", err) } - _, err = b.get(1, 2) + _, err = b.Get(1, 2) if !errors.Is(err, errOutOfBounds) { t.Fatalf("unexpected error: %s", err) } @@ -354,7 +354,7 @@ func TestGet(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - bm, err := b.get(tc.start, tc.end) + bm, err := b.Get(tc.start, tc.end) if !errors.Is(err, tc.expectedErr) { t.Fatalf("unexpected error: %s", err) } @@ -402,7 +402,7 @@ func TestBacklogRaceCondition(t *testing.T) { go func(t *testing.T, b *backlog) { defer wg.Done() for _, i := range []uint64{42, 43, 44, 45, 46, 47} { - bm, err := b.get(i, i+1) + bm, err := b.Get(i, i+1) errs <- err if err != nil { return diff --git a/broadcaster/sequencenumbercatchupbuffer.go b/broadcaster/sequencenumbercatchupbuffer.go deleted file mode 100644 index fb51a3ceaa..0000000000 --- a/broadcaster/sequencenumbercatchupbuffer.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package broadcaster - -import ( - "errors" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" - - "github.com/offchainlabs/nitro/arbutil" - m "github.com/offchainlabs/nitro/broadcaster/message" - "github.com/offchainlabs/nitro/wsbroadcastserver" -) - -const ( - // Do not send cache if requested seqnum is older than last cached minus maxRequestedSeqNumOffset - maxRequestedSeqNumOffset = arbutil.MessageIndex(10_000) -) - -var ( - confirmedSequenceNumberGauge = metrics.NewRegisteredGauge("arb/sequencenumber/confirmed", nil) - cachedMessagesSentHistogram = metrics.NewRegisteredHistogram("arb/feed/clients/cache/sent", nil, metrics.NewBoundedHistogramSample()) -) - -type SequenceNumberCatchupBuffer struct { - messages []*m.BroadcastFeedMessage - messageCount int32 - limitCatchup func() bool - maxCatchup func() int -} - -func NewSequenceNumberCatchupBuffer(limitCatchup func() bool, maxCatchup func() int) *SequenceNumberCatchupBuffer { - return &SequenceNumberCatchupBuffer{ - limitCatchup: limitCatchup, - maxCatchup: maxCatchup, - } -} - -func (b *SequenceNumberCatchupBuffer) getCacheMessages(requestedSeqNum arbutil.MessageIndex) *m.BroadcastMessage { - if len(b.messages) == 0 { - return nil - } - var startingIndex int32 - // Ignore messages older than requested sequence number - firstCachedSeqNum := b.messages[0].SequenceNumber - if firstCachedSeqNum < requestedSeqNum { - lastCachedSeqNum := firstCachedSeqNum + arbutil.MessageIndex(len(b.messages)-1) - if lastCachedSeqNum < requestedSeqNum { - // Past end, nothing to return - return nil - } - startingIndex = int32(requestedSeqNum - firstCachedSeqNum) - if startingIndex >= int32(len(b.messages)) { - log.Error("unexpected startingIndex", "requestedSeqNum", requestedSeqNum, "firstCachedSeqNum", firstCachedSeqNum, "startingIndex", startingIndex, "lastCachedSeqNum", lastCachedSeqNum, "cacheLength", len(b.messages)) - return nil - } - if b.messages[startingIndex].SequenceNumber != requestedSeqNum { - log.Error("requestedSeqNum not found where expected", "requestedSeqNum", requestedSeqNum, "firstCachedSeqNum", firstCachedSeqNum, "startingIndex", startingIndex, "foundSeqNum", b.messages[startingIndex].SequenceNumber) - return nil - } - } else if b.limitCatchup() && firstCachedSeqNum > maxRequestedSeqNumOffset && requestedSeqNum < (firstCachedSeqNum-maxRequestedSeqNumOffset) { - // Requested seqnum is too old, don't send any cache - return nil - } - - messagesToSend := b.messages[startingIndex:] - if len(messagesToSend) > 0 { - bm := m.BroadcastMessage{ - Version: 1, - Messages: messagesToSend, - } - - return &bm - } - - return nil -} - -func (b *SequenceNumberCatchupBuffer) OnRegisterClient(clientConnection *wsbroadcastserver.ClientConnection) (error, int, time.Duration) { - start := time.Now() - bm := b.getCacheMessages(clientConnection.RequestedSeqNum()) - var bmCount int - if bm != nil { - bmCount = len(bm.Messages) - } - if bm != nil { - // send the newly connected client the requested messages - err := clientConnection.Write(bm) - if err != nil { - log.Error("error sending client cached messages", "error", err, "client", clientConnection.Name, "elapsed", time.Since(start)) - return err, 0, 0 - } - } - - cachedMessagesSentHistogram.Update(int64(bmCount)) - - return nil, bmCount, time.Since(start) -} - -// Takes as input an index into the messages array, not a message index -func (b *SequenceNumberCatchupBuffer) pruneBufferToIndex(idx int) { - b.messages = b.messages[idx:] - if len(b.messages) > 10 && cap(b.messages) > len(b.messages)*10 { - // Too much spare capacity, copy to fresh slice to reset memory usage - b.messages = append([]*m.BroadcastFeedMessage(nil), b.messages[:len(b.messages)]...) - } -} - -func (b *SequenceNumberCatchupBuffer) deleteConfirmed(confirmedSequenceNumber arbutil.MessageIndex) { - if len(b.messages) == 0 { - return - } - - firstSequenceNumber := b.messages[0].SequenceNumber - - if confirmedSequenceNumber < firstSequenceNumber { - // Confirmed sequence number is older than cache, so nothing to do - return - } - - confirmedIndex := uint64(confirmedSequenceNumber - firstSequenceNumber) - - if confirmedIndex >= uint64(len(b.messages)) { - log.Error("ConfirmedSequenceNumber is past the end of stored messages", "confirmedSequenceNumber", confirmedSequenceNumber, "firstSequenceNumber", firstSequenceNumber, "cacheLength", len(b.messages)) - b.messages = nil - return - } - - if b.messages[confirmedIndex].SequenceNumber != confirmedSequenceNumber { - // Log instead of returning error here so that the message will be sent to downstream - // relays to also cause them to be cleared. - log.Error("Invariant violation: confirmedSequenceNumber is not where expected, clearing buffer", "confirmedSequenceNumber", confirmedSequenceNumber, "firstSequenceNumber", firstSequenceNumber, "cacheLength", len(b.messages), "foundSequenceNumber", b.messages[confirmedIndex].SequenceNumber) - b.messages = nil - return - } - - b.pruneBufferToIndex(int(confirmedIndex) + 1) -} - -func (b *SequenceNumberCatchupBuffer) OnDoBroadcast(bmi interface{}) error { - broadcastMessage, ok := bmi.(m.BroadcastMessage) - if !ok { - msg := "requested to broadcast message of unknown type" - log.Error(msg) - return errors.New(msg) - } - defer func() { atomic.StoreInt32(&b.messageCount, int32(len(b.messages))) }() - - if confirmMsg := broadcastMessage.ConfirmedSequenceNumberMessage; confirmMsg != nil { - b.deleteConfirmed(confirmMsg.SequenceNumber) - confirmedSequenceNumberGauge.Update(int64(confirmMsg.SequenceNumber)) - } - - maxCatchup := b.maxCatchup() - if maxCatchup == 0 { - b.messages = nil - return nil - } - - for _, newMsg := range broadcastMessage.Messages { - if len(b.messages) == 0 { - // Add to empty list - b.messages = append(b.messages, newMsg) - } else if expectedSequenceNumber := b.messages[len(b.messages)-1].SequenceNumber + 1; newMsg.SequenceNumber == expectedSequenceNumber { - // Next sequence number to add to end of list - b.messages = append(b.messages, newMsg) - } else if newMsg.SequenceNumber > expectedSequenceNumber { - log.Warn( - "Message requested to be broadcast has unexpected sequence number; discarding to seqNum from catchup buffer", - "seqNum", newMsg.SequenceNumber, - "expectedSeqNum", expectedSequenceNumber, - ) - b.messages = nil - b.messages = append(b.messages, newMsg) - } else { - log.Info("Skipping already seen message", "seqNum", newMsg.SequenceNumber) - } - } - - if maxCatchup >= 0 && len(b.messages) > maxCatchup { - b.pruneBufferToIndex(len(b.messages) - maxCatchup) - } - - return nil - -} - -func (b *SequenceNumberCatchupBuffer) GetMessageCount() int { - return int(atomic.LoadInt32(&b.messageCount)) -} diff --git a/broadcaster/sequencenumbercatchupbuffer_test.go b/broadcaster/sequencenumbercatchupbuffer_test.go deleted file mode 100644 index 80e1efe5c1..0000000000 --- a/broadcaster/sequencenumbercatchupbuffer_test.go +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright 2020-2021, Offchain Labs, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package broadcaster - -import ( - "strings" - "testing" - - "github.com/offchainlabs/nitro/arbutil" - m "github.com/offchainlabs/nitro/broadcaster/message" - "github.com/offchainlabs/nitro/util/arbmath" -) - -func TestGetEmptyCacheMessages(t *testing.T) { - buffer := SequenceNumberCatchupBuffer{ - messages: nil, - messageCount: 0, - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - // Get everything - bm := buffer.getCacheMessages(0) - if bm != nil { - t.Error("shouldn't have returned anything") - } -} - -func TestGetCacheMessages(t *testing.T) { - indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46} - buffer := SequenceNumberCatchupBuffer{ - messages: m.CreateDummyBroadcastMessages(indexes), - messageCount: int32(len(indexes)), - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - // Get everything - bm := buffer.getCacheMessages(0) - if len(bm.Messages) != 7 { - t.Error("didn't return all messages") - } - - // Get everything - bm = buffer.getCacheMessages(1) - if len(bm.Messages) != 7 { - t.Error("didn't return all messages") - } - - // Get everything - bm = buffer.getCacheMessages(40) - if len(bm.Messages) != 7 { - t.Error("didn't return all messages") - } - - // Get nothing - bm = buffer.getCacheMessages(100) - if bm != nil { - t.Error("should not have returned anything") - } - - // Test single - bm = buffer.getCacheMessages(46) - if bm == nil { - t.Fatal("nothing returned") - } - if len(bm.Messages) != 1 { - t.Errorf("expected 1 message, got %d messages", len(bm.Messages)) - } - if bm.Messages[0].SequenceNumber != 46 { - t.Errorf("expected sequence number 46, got %d", bm.Messages[0].SequenceNumber) - } - - // Test extremes - bm = buffer.getCacheMessages(arbutil.MessageIndex(^uint64(0))) - if bm != nil { - t.Fatal("should not have returned anything") - } -} - -func TestDeleteConfirmedNil(t *testing.T) { - buffer := SequenceNumberCatchupBuffer{ - messages: nil, - messageCount: 0, - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - buffer.deleteConfirmed(0) - if len(buffer.messages) != 0 { - t.Error("nothing should be present") - } -} - -func TestDeleteConfirmInvalidOrder(t *testing.T) { - indexes := []arbutil.MessageIndex{40, 42} - buffer := SequenceNumberCatchupBuffer{ - messages: m.CreateDummyBroadcastMessages(indexes), - messageCount: int32(len(indexes)), - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - // Confirm before cache - buffer.deleteConfirmed(41) - if len(buffer.messages) != 0 { - t.Error("cache not in contiguous order should have caused everything to be deleted") - } -} - -func TestDeleteConfirmed(t *testing.T) { - indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46} - buffer := SequenceNumberCatchupBuffer{ - messages: m.CreateDummyBroadcastMessages(indexes), - messageCount: int32(len(indexes)), - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - // Confirm older than cache - buffer.deleteConfirmed(39) - if len(buffer.messages) != 7 { - t.Error("nothing should have been deleted") - } - -} -func TestDeleteFreeMem(t *testing.T) { - indexes := []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51} - buffer := SequenceNumberCatchupBuffer{ - messages: m.CreateDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), - messageCount: int32(len(indexes)), - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - // Confirm older than cache - buffer.deleteConfirmed(40) - if cap(buffer.messages) > 20 { - t.Error("extra memory was not freed, cap: ", cap(buffer.messages)) - } - -} - -func TestBroadcastBadMessage(t *testing.T) { - buffer := SequenceNumberCatchupBuffer{ - messages: nil, - messageCount: 0, - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - var foo int - err := buffer.OnDoBroadcast(foo) - if err == nil { - t.Error("expected error") - } - if !strings.Contains(err.Error(), "unknown type") { - t.Error("unexpected type") - } -} - -func TestBroadcastPastSeqNum(t *testing.T) { - indexes := []arbutil.MessageIndex{40} - buffer := SequenceNumberCatchupBuffer{ - messages: m.CreateDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), - messageCount: int32(len(indexes)), - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - bm := m.BroadcastMessage{ - Messages: []*m.BroadcastFeedMessage{ - { - SequenceNumber: 39, - }, - }, - } - err := buffer.OnDoBroadcast(bm) - if err != nil { - t.Error("expected error") - } - -} - -func TestBroadcastFutureSeqNum(t *testing.T) { - indexes := []arbutil.MessageIndex{40} - buffer := SequenceNumberCatchupBuffer{ - messages: m.CreateDummyBroadcastMessagesImpl(indexes, len(indexes)*10+1), - messageCount: int32(len(indexes)), - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return -1 }, - } - - bm := m.BroadcastMessage{ - Messages: []*m.BroadcastFeedMessage{ - { - SequenceNumber: 42, - }, - }, - } - err := buffer.OnDoBroadcast(bm) - if err != nil { - t.Error("expected error") - } - -} - -func TestMaxCatchupBufferSize(t *testing.T) { - limit := 5 - buffer := SequenceNumberCatchupBuffer{ - messages: nil, - messageCount: 0, - limitCatchup: func() bool { return false }, - maxCatchup: func() int { return limit }, - } - - firstMessage := 10 - for i := firstMessage; i <= 20; i += 2 { - bm := m.BroadcastMessage{ - Messages: []*m.BroadcastFeedMessage{ - { - SequenceNumber: arbutil.MessageIndex(i), - }, - { - SequenceNumber: arbutil.MessageIndex(i + 1), - }, - }, - } - err := buffer.OnDoBroadcast(bm) - Require(t, err) - haveMessages := buffer.getCacheMessages(0) - expectedCount := arbmath.MinInt(i+len(bm.Messages)-firstMessage, limit) - if len(haveMessages.Messages) != expectedCount { - t.Errorf("after broadcasting messages %v and %v, expected to have %v messages but got %v", i, i+1, expectedCount, len(haveMessages.Messages)) - } - expectedFirstMessage := arbutil.MessageIndex(arbmath.MaxInt(firstMessage, i+len(bm.Messages)-limit)) - if haveMessages.Messages[0].SequenceNumber != expectedFirstMessage { - t.Errorf("after broadcasting messages %v and %v, expected the first message to be %v but got %v", i, i+1, expectedFirstMessage, haveMessages.Messages[0].SequenceNumber) - } - } -} diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 907108b6bd..e4452f8f11 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -26,6 +26,11 @@ import ( var errContextDone = errors.New("context done") +type message struct { + data []byte + sequenceNumber arbutil.MessageIndex +} + // ClientConnection represents client connection. type ClientConnection struct { stopwaiter.StopWaiter @@ -39,11 +44,13 @@ type ClientConnection struct { Name string clientManager *ClientManager requestedSeqNum arbutil.MessageIndex + LastSentSeqNum atomic.Uint64 lastHeardUnix int64 - out chan []byte + out chan message backlog backlog.Backlog registered chan bool + backlogSent bool compression bool flateReader *wsflate.Reader @@ -70,12 +77,13 @@ func NewClientConnection( clientManager: clientManager, requestedSeqNum: requestedSeqNum, lastHeardUnix: time.Now().Unix(), - out: make(chan []byte, clientManager.config().MaxSendQueue), + out: make(chan message, clientManager.config().MaxSendQueue), compression: compression, flateReader: NewFlateReader(), delay: delay, backlog: bklg, registered: make(chan bool, 1), + backlogSent: false, } } @@ -87,41 +95,57 @@ func (cc *ClientConnection) Compression() bool { return cc.compression } -func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.BacklogSegment) (backlog.BacklogSegment, error) { +func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.BacklogSegment) error { var prevSegment backlog.BacklogSegment for !backlog.IsBacklogSegmentNil(segment) { + // must get the next segment before the messages to be sent are + // retrieved ensures another segment is not added in between calls. + prevSegment = segment + segment = segment.Next() + select { case <-ctx.Done(): - return nil, errContextDone + return errContextDone default: } + msgs := prevSegment.Messages() bm := &m.BroadcastMessage{ Version: 1, // TODO(clamb): I am unsure if it is correct to hard code the version here like this? It seems to be done in other places though - Messages: segment.Messages(), + Messages: msgs, } - notCompressed, compressed, err := serializeMessage(cc.clientManager, bm, !cc.compression, cc.compression) + err := cc.writeBroadcastMessage(bm) if err != nil { - return nil, err + return err } - data := []byte{} - if cc.compression { - data = compressed.Bytes() - } else { - data = notCompressed.Bytes() - } - err = cc.writeRaw(data) - if err != nil { - return nil, err - } - log.Debug("segment sent to client", "client", cc.Name, "sentCount", len(bm.Messages)) + // do not use prevSegment.End() method, must figure out the last + // sequence number from the messages that were actually sent in case + // more messages are added. + end := uint64(msgs[len(msgs)-1].SequenceNumber) + cc.LastSentSeqNum.Store(end) + log.Debug("segment sent to client", "client", cc.Name, "sentCount", len(bm.Messages), "lastSentSeqNum", end) + } + return nil +} - prevSegment = segment - segment = segment.Next() +func (cc *ClientConnection) writeBroadcastMessage(bm *m.BroadcastMessage) error { + notCompressed, compressed, err := serializeMessage(cc.clientManager, bm, !cc.compression, cc.compression) + if err != nil { + return err } - return prevSegment, nil + data := []byte{} + if cc.compression { + data = compressed.Bytes() + } else { + data = notCompressed.Bytes() + } + err = cc.writeRaw(data) + if err != nil { + return err + } + return nil } func (cc *ClientConnection) Start(parentCtx context.Context) { @@ -147,7 +171,7 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { cc.clientManager.Remove(cc) return } - segment, err = cc.writeBacklog(ctx, segment) + err = cc.writeBacklog(ctx, segment) if errors.Is(err, errContextDone) { return } else if err != nil { @@ -167,29 +191,32 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { log.Error("timed out waiting for ClientConnection to register with ClientManager", "client", cc.Name) } - // The backlog may have had more messages added to it whilst the - // ClientConnection registers with the ClientManager, therefore the - // last segment must be sent again. This may result in duplicate - // messages being sent to the client but the client should handle any - // duplicate messages. The ClientConnection can not be registered - // before the backlog is sent as the backlog may be very large. This - // could result in the out channel running out of space. - _, err = cc.writeBacklog(ctx, segment) - if errors.Is(err, errContextDone) { - return - } else if err != nil { - logWarn(err, "error writing messages from backlog") - cc.clientManager.Remove(cc) - return - } - // broadcast any new messages sent to the out channel for { select { case <-ctx.Done(): return - case data := <-cc.out: - err := cc.writeRaw(data) + case msg := <-cc.out: + expSeqNum := cc.LastSentSeqNum.Load() + 1 + if !cc.backlogSent && uint64(msg.sequenceNumber) > expSeqNum { + catchupSeqNum := uint64(msg.sequenceNumber) - 1 + bm, err := cc.backlog.Get(expSeqNum, catchupSeqNum) + if err != nil { + logWarn(err, fmt.Sprintf("error reading messages %d to %d from backlog", expSeqNum, catchupSeqNum)) + cc.clientManager.Remove(cc) + return + } + + err = cc.writeBroadcastMessage(bm) + if err != nil { + logWarn(err, fmt.Sprintf("error writing messages %d to %d from backlog", expSeqNum, catchupSeqNum)) + cc.clientManager.Remove(cc) + return + } + } + cc.backlogSent = true + + err := cc.writeRaw(msg.data) if err != nil { logWarn(err, "error writing data to client") cc.clientManager.Remove(cc) @@ -248,23 +275,6 @@ func (cc *ClientConnection) readRequest(ctx context.Context, timeout time.Durati return data, opCode, err } -func (cc *ClientConnection) Write(bm *m.BroadcastMessage) error { - cc.ioMutex.Lock() - defer cc.ioMutex.Unlock() - - notCompressed, compressed, err := serializeMessage(cc.clientManager, bm, !cc.compression, cc.compression) - if err != nil { - return err - } - - if cc.compression { - cc.out <- compressed.Bytes() - } else { - cc.out <- notCompressed.Bytes() - } - return nil -} - func (cc *ClientConnection) writeRaw(p []byte) error { cc.ioMutex.Lock() defer cc.ioMutex.Unlock() diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index b2790f850f..589f9ca67e 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -172,7 +172,11 @@ func (cm *ClientManager) Broadcast(bm *m.BroadcastMessage) { cm.broadcastChan <- bm } -func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnection, error) { +func (cm *ClientManager) doBroadcast(bfm *m.BroadcastFeedMessage) ([]*ClientConnection, error) { + bm := &m.BroadcastMessage{ + Version: 1, + Messages: []*m.BroadcastFeedMessage{bfm}, + } if err := cm.backlog.Append(bm); err != nil { return nil, err } @@ -207,8 +211,18 @@ func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnectio continue } } + + if uint64(bfm.SequenceNumber) <= client.LastSentSeqNum.Load() { + log.Warn("client has already sent message with this sequence number, skipping the message", "client", client.Name, "sequence number", bfm.SequenceNumber) + continue + } + + m := message{ + sequenceNumber: bfm.SequenceNumber, + data: data, + } select { - case client.out <- data: + case client.out <- m: default: // Queue for client too backed up, disconnect instead of blocking on channel send sendQueueTooLargeCount++ @@ -327,8 +341,10 @@ func (cm *ClientManager) Start(parentCtx context.Context) { } case bm := <-cm.broadcastChan: var err error - clientDeleteList, err = cm.doBroadcast(bm) - logError(err, "failed to do broadcast") + for _, msg := range bm.Messages { + clientDeleteList, err = cm.doBroadcast(msg) + logError(err, "failed to do broadcast") + } case <-pingTimer.C: clientDeleteList = cm.verifyClients() pingTimer.Reset(cm.config().Ping) From 1858770edb48210c729af1866e88b4c2225cb15a Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Fri, 10 Nov 2023 12:37:25 +1300 Subject: [PATCH 15/71] fix BroadcastClient test TestBroadcastClientConfirmedMessage --- wsbroadcastserver/clientconnection.go | 6 ++--- wsbroadcastserver/clientmanager.go | 35 ++++++++++++++++++++------- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index e4452f8f11..94845bde94 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -28,7 +28,7 @@ var errContextDone = errors.New("context done") type message struct { data []byte - sequenceNumber arbutil.MessageIndex + sequenceNumber *arbutil.MessageIndex } // ClientConnection represents client connection. @@ -198,8 +198,8 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { return case msg := <-cc.out: expSeqNum := cc.LastSentSeqNum.Load() + 1 - if !cc.backlogSent && uint64(msg.sequenceNumber) > expSeqNum { - catchupSeqNum := uint64(msg.sequenceNumber) - 1 + if !cc.backlogSent && msg.sequenceNumber != nil && uint64(*msg.sequenceNumber) > expSeqNum { + catchupSeqNum := uint64(*msg.sequenceNumber) - 1 bm, err := cc.backlog.Get(expSeqNum, catchupSeqNum) if err != nil { logWarn(err, fmt.Sprintf("error reading messages %d to %d from backlog", expSeqNum, catchupSeqNum)) diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 589f9ca67e..614e1ba143 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcaster/backlog" m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/stopwaiter" @@ -172,11 +173,7 @@ func (cm *ClientManager) Broadcast(bm *m.BroadcastMessage) { cm.broadcastChan <- bm } -func (cm *ClientManager) doBroadcast(bfm *m.BroadcastFeedMessage) ([]*ClientConnection, error) { - bm := &m.BroadcastMessage{ - Version: 1, - Messages: []*m.BroadcastFeedMessage{bfm}, - } +func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnection, error) { if err := cm.backlog.Append(bm); err != nil { return nil, err } @@ -212,13 +209,23 @@ func (cm *ClientManager) doBroadcast(bfm *m.BroadcastFeedMessage) ([]*ClientConn } } - if uint64(bfm.SequenceNumber) <= client.LastSentSeqNum.Load() { - log.Warn("client has already sent message with this sequence number, skipping the message", "client", client.Name, "sequence number", bfm.SequenceNumber) + var seqNum *arbutil.MessageIndex + n := len(bm.Messages) + if n == 0 { + seqNum = nil + } else if n == 1 { + seqNum = &bm.Messages[0].SequenceNumber + } else { + return nil, fmt.Errorf("doBroadcast was sent %d BroadcastFeedMessages, it can only parse 1 BroadcastFeedMessage at a time", n) + } + + if seqNum != nil && uint64(*seqNum) <= client.LastSentSeqNum.Load() { + log.Warn("client has already sent message with this sequence number, skipping the message", "client", client.Name, "sequence number", *seqNum) continue } m := message{ - sequenceNumber: bfm.SequenceNumber, + sequenceNumber: seqNum, data: data, } select { @@ -342,7 +349,17 @@ func (cm *ClientManager) Start(parentCtx context.Context) { case bm := <-cm.broadcastChan: var err error for _, msg := range bm.Messages { - clientDeleteList, err = cm.doBroadcast(msg) + m := &m.BroadcastMessage{ + Version: bm.Version, + Messages: []*m.BroadcastFeedMessage{msg}, + ConfirmedSequenceNumberMessage: bm.ConfirmedSequenceNumberMessage, + } + clientDeleteList, err = cm.doBroadcast(m) + logError(err, "failed to do broadcast") + } + + if len(bm.Messages) == 0 { + clientDeleteList, err = cm.doBroadcast(bm) logError(err, "failed to do broadcast") } case <-pingTimer.C: From 66357cf75c976f397955f0737b002c11664f0ae7 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Fri, 10 Nov 2023 17:26:47 +1300 Subject: [PATCH 16/71] move flateWriter to serializeMessage to avoid race condition --- wsbroadcastserver/clientconnection.go | 2 +- wsbroadcastserver/clientmanager.go | 25 +++++++++++-------------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 94845bde94..b29e653d8d 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -130,7 +130,7 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba } func (cc *ClientConnection) writeBroadcastMessage(bm *m.BroadcastMessage) error { - notCompressed, compressed, err := serializeMessage(cc.clientManager, bm, !cc.compression, cc.compression) + notCompressed, compressed, err := serializeMessage(bm, !cc.compression, cc.compression) if err != nil { return err } diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 614e1ba143..21c4f131fd 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -52,7 +52,6 @@ type ClientManager struct { clientAction chan ClientConnectionAction config BroadcasterConfigFetcher backlog backlog.Backlog - flateWriter *flate.Writer connectionLimiter *ConnectionLimiter } @@ -180,9 +179,9 @@ func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnectio config := cm.config() // /-> wsutil.Writer -> not compressed msg buffer // bm -> json.Encoder -> io.MultiWriter -| - // \-> cm.flateWriter -> wsutil.Writer -> compressed msg buffer + // \-> flateWriter -> wsutil.Writer -> compressed msg buffer - notCompressed, compressed, err := serializeMessage(cm, bm, !config.RequireCompression, config.EnableCompression) + notCompressed, compressed, err := serializeMessage(bm, !config.RequireCompression, config.EnableCompression) if err != nil { return nil, err } @@ -248,7 +247,12 @@ func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnectio return clientDeleteList, nil } -func serializeMessage(cm *ClientManager, bm *m.BroadcastMessage, enableNonCompressedOutput, enableCompressedOutput bool) (bytes.Buffer, bytes.Buffer, error) { +func serializeMessage(bm *m.BroadcastMessage, enableNonCompressedOutput, enableCompressedOutput bool) (bytes.Buffer, bytes.Buffer, error) { + flateWriter, err := flate.NewWriterDict(nil, DeflateCompressionLevel, GetStaticCompressorDictionary()) + if err != nil { + return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to create flate writer: %w", err) + } + var notCompressed bytes.Buffer var compressed bytes.Buffer writers := []io.Writer{} @@ -259,19 +263,12 @@ func serializeMessage(cm *ClientManager, bm *m.BroadcastMessage, enableNonCompre writers = append(writers, notCompressedWriter) } if enableCompressedOutput { - if cm.flateWriter == nil { - var err error - cm.flateWriter, err = flate.NewWriterDict(nil, DeflateCompressionLevel, GetStaticCompressorDictionary()) - if err != nil { - return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to create flate writer: %w", err) - } - } compressedWriter = wsutil.NewWriter(&compressed, ws.StateServerSide|ws.StateExtended, ws.OpText) var msg wsflate.MessageState msg.SetCompressed(true) compressedWriter.SetExtensions(&msg) - cm.flateWriter.Reset(compressedWriter) - writers = append(writers, cm.flateWriter) + flateWriter.Reset(compressedWriter) + writers = append(writers, flateWriter) } multiWriter := io.MultiWriter(writers...) @@ -285,7 +282,7 @@ func serializeMessage(cm *ClientManager, bm *m.BroadcastMessage, enableNonCompre } } if compressedWriter != nil { - if err := cm.flateWriter.Close(); err != nil { + if err := flateWriter.Close(); err != nil { return bytes.Buffer{}, bytes.Buffer{}, fmt.Errorf("unable to close flate writer: %w", err) } if err := compressedWriter.Flush(); err != nil { From b046b8fced847fa7490dacc82ba8e0b5eca5065b Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Tue, 14 Nov 2023 14:49:42 +1300 Subject: [PATCH 17/71] refactor ClientConnection to stop relying on ClientManager - CM will pass down the required channel to communicate with each CC --- wsbroadcastserver/clientconnection.go | 42 ++++++++++++++++++++------ wsbroadcastserver/clientmanager.go | 20 ------------ wsbroadcastserver/wsbroadcastserver.go | 6 ++-- 3 files changed, 35 insertions(+), 33 deletions(-) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index b29e653d8d..6b4c33b8f5 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -31,6 +31,11 @@ type message struct { sequenceNumber *arbutil.MessageIndex } +type ClientConnectionAction struct { + cc *ClientConnection + create bool +} + // ClientConnection represents client connection. type ClientConnection struct { stopwaiter.StopWaiter @@ -42,7 +47,7 @@ type ClientConnection struct { desc *netpoll.Desc Name string - clientManager *ClientManager + clientAction chan ClientConnectionAction requestedSeqNum arbutil.MessageIndex LastSentSeqNum atomic.Uint64 @@ -61,10 +66,11 @@ type ClientConnection struct { func NewClientConnection( conn net.Conn, desc *netpoll.Desc, - clientManager *ClientManager, + clientAction chan ClientConnectionAction, requestedSeqNum arbutil.MessageIndex, connectingIP net.IP, compression bool, + maxSendQueue int, delay time.Duration, bklg backlog.Backlog, ) *ClientConnection { @@ -74,10 +80,10 @@ func NewClientConnection( desc: desc, creation: time.Now(), Name: fmt.Sprintf("%s@%s-%d", connectingIP, conn.RemoteAddr(), rand.Intn(10)), - clientManager: clientManager, + clientAction: clientAction, requestedSeqNum: requestedSeqNum, lastHeardUnix: time.Now().Unix(), - out: make(chan message, clientManager.config().MaxSendQueue), + out: make(chan message, maxSendQueue), compression: compression, flateReader: NewFlateReader(), delay: delay, @@ -95,6 +101,22 @@ func (cc *ClientConnection) Compression() bool { return cc.compression } +// Register sends the ClientConnection to be registered with the ClientManager. +func (cc *ClientConnection) Register() { + cc.clientAction <- ClientConnectionAction{ + cc: cc, + create: true, + } +} + +// Remove sends the ClientConnection to be removed from the ClientManager. +func (cc *ClientConnection) Remove() { + cc.clientAction <- ClientConnectionAction{ + cc: cc, + create: false, + } +} + func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.BacklogSegment) error { var prevSegment backlog.BacklogSegment for !backlog.IsBacklogSegmentNil(segment) { @@ -168,7 +190,7 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { segment, err := cc.backlog.Lookup(uint64(cc.requestedSeqNum)) if err != nil { logWarn(err, "error finding requested sequence number in backlog") - cc.clientManager.Remove(cc) + cc.Remove() return } err = cc.writeBacklog(ctx, segment) @@ -176,11 +198,11 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { return } else if err != nil { logWarn(err, "error writing messages from backlog") - cc.clientManager.Remove(cc) + cc.Remove() return } - cc.clientManager.Register(cc) + cc.Register() timer := time.NewTimer(5 * time.Second) select { case <-ctx.Done(): @@ -203,14 +225,14 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { bm, err := cc.backlog.Get(expSeqNum, catchupSeqNum) if err != nil { logWarn(err, fmt.Sprintf("error reading messages %d to %d from backlog", expSeqNum, catchupSeqNum)) - cc.clientManager.Remove(cc) + cc.Remove() return } err = cc.writeBroadcastMessage(bm) if err != nil { logWarn(err, fmt.Sprintf("error writing messages %d to %d from backlog", expSeqNum, catchupSeqNum)) - cc.clientManager.Remove(cc) + cc.Remove() return } } @@ -219,7 +241,7 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { err := cc.writeRaw(msg.data) if err != nil { logWarn(err, "error writing data to client") - cc.clientManager.Remove(cc) + cc.Remove() return } } diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 21c4f131fd..683d652c2e 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -56,11 +56,6 @@ type ClientManager struct { connectionLimiter *ConnectionLimiter } -type ClientConnectionAction struct { - cc *ClientConnection - create bool -} - func NewClientManager(poller netpoll.Poller, configFetcher BroadcasterConfigFetcher, bklg backlog.Backlog) *ClientManager { config := configFetcher() return &ClientManager{ @@ -98,14 +93,6 @@ func (cm *ClientManager) registerClient(ctx context.Context, clientConnection *C return nil } -// Register registers given connection as a Client. -func (cm *ClientManager) Register(clientConnection *ClientConnection) { - cm.clientAction <- ClientConnectionAction{ - clientConnection, - true, - } -} - // removeAll removes all clients after main ClientManager thread exits func (cm *ClientManager) removeAll() { // Only called after main ClientManager thread exits, so remove client directly @@ -150,13 +137,6 @@ func (cm *ClientManager) removeClient(clientConnection *ClientConnection) { delete(cm.clientPtrMap, clientConnection) } -func (cm *ClientManager) Remove(clientConnection *ClientConnection) { - cm.clientAction <- ClientConnectionAction{ - clientConnection, - false, - } -} - func (cm *ClientManager) ClientCount() int32 { return atomic.LoadInt32(&cm.clientCount) } diff --git a/wsbroadcastserver/wsbroadcastserver.go b/wsbroadcastserver/wsbroadcastserver.go index 3a98871fec..eb47f8a635 100644 --- a/wsbroadcastserver/wsbroadcastserver.go +++ b/wsbroadcastserver/wsbroadcastserver.go @@ -378,7 +378,7 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands // Register incoming client in clientManager. safeConn := writeDeadliner{conn, config.WriteTimeout} - client := NewClientConnection(safeConn, desc, s.clientManager, requestedSeqNum, connectingIP, compressionAccepted, s.config().ClientDelay, s.backlog) + client := NewClientConnection(safeConn, desc, s.clientManager.clientAction, requestedSeqNum, connectingIP, compressionAccepted, s.config().MaxSendQueue, s.config().ClientDelay, s.backlog) client.Start(ctx) // Subscribe to events about conn. @@ -387,7 +387,7 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands // ReadHup or Hup received, means the client has close the connection // remove it from the clientManager registry. log.Debug("Hup received", "age", client.Age(), "client", client.Name) - s.clientManager.Remove(client) + client.Remove() return } @@ -399,7 +399,7 @@ func (s *WSBroadcastServer) StartWithHeader(ctx context.Context, header ws.Hands s.clientManager.pool.Schedule(func() { // Ignore any messages sent from client, close on any error if _, _, err := client.Receive(ctx, s.config().ReadTimeout); err != nil { - s.clientManager.Remove(client) + client.Remove() return } }) From 7cf1b2b4d49929d284dac975441f61f1522cbc04 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Tue, 14 Nov 2023 14:50:26 +1300 Subject: [PATCH 18/71] refactor backlog to allow deletion of all confirmed messages from segments --- broadcaster/backlog/backlog.go | 83 ++++++++++++++++++----------- broadcaster/backlog/backlog_test.go | 48 ++++++++++++----- broadcaster/broadcaster_test.go | 14 ++--- 3 files changed, 94 insertions(+), 51 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 57b4810078..de786bd32f 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -132,8 +132,10 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { return bm, nil } -// delete removes segments before the confirmed sequence number given. It will -// not remove the segment containing the confirmed sequence number. +// delete removes segments before the confirmed sequence number given. The +// segment containing the confirmed sequence number will continue to store +// previous messages but will register that messages up to the given number +// have been deleted. func (b *backlog) delete(confirmed uint64) { head := b.head.Load() tail := b.tail.Load() @@ -148,36 +150,38 @@ func (b *backlog) delete(confirmed uint64) { if confirmed > tail.End() { log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.end.Load()) b.reset() - // should this be returning an error? The other buffer does not and just continues return } // find the segment containing the confirmed message - segment, err := b.Lookup(confirmed) + found, err := b.Lookup(confirmed) if err != nil { log.Error(fmt.Sprintf("%s: clearing backlog", err.Error())) b.reset() - // should this be returning an error? The other buffer does not and just continues return } - - // check the segment actually contains that message - if found := segment.Contains(confirmed); !found { - log.Error("error message not found in backlog segment, clearing backlog", "confirmed sequence number", confirmed) - b.reset() - // should this be returning an error? The other buffer does not and just continues - return + segment := found.(*backlogSegment) + + // delete messages from the segment with the confirmed message + newHead := segment + start := head.Start() + if segment.End() == confirmed { + found = segment.Next() + newHead = found.(*backlogSegment) + } else { + err = segment.Delete(confirmed) + if err != nil { + log.Error(fmt.Sprintf("%s: clearing backlog", err.Error())) + b.reset() + return + } } - // remove all previous segments - previous := segment.Previous() - if IsBacklogSegmentNil(previous) { - return - } - b.removeFromLookup(head.Start(), previous.End()) - b.head.Store(segment.(*backlogSegment)) - count := b.Count() + head.Start() - previous.End() - uint64(1) + // tidy up lookup, count and head + b.removeFromLookup(start, confirmed) + count := b.Count() + start - confirmed - uint64(1) b.messageCount.Store(count) + b.head.Store(newHead) } // removeFromLookup removes all entries from the head segment's start index to @@ -185,7 +189,7 @@ func (b *backlog) delete(confirmed uint64) { func (b *backlog) removeFromLookup(start, end uint64) { b.lookupLock.Lock() defer b.lookupLock.Unlock() - for i := start; i == end; i++ { + for i := start; i <= end; i++ { delete(b.lookupByIndex, i) } } @@ -218,16 +222,16 @@ func (b *backlog) reset() { type BacklogSegment interface { Start() uint64 End() uint64 - Contains(uint64) bool Next() BacklogSegment - Previous() BacklogSegment Get(uint64, uint64) ([]*m.BroadcastFeedMessage, error) Messages() []*m.BroadcastFeedMessage + Delete(uint64) error } type backlogSegment struct { start atomic.Uint64 end atomic.Uint64 + messagesLock sync.RWMutex messages []*m.BroadcastFeedMessage messageCount atomic.Uint64 nextSegment atomic.Pointer[backlogSegment] @@ -262,11 +266,9 @@ func (s *backlogSegment) Next() BacklogSegment { return s.nextSegment.Load() } -func (s *backlogSegment) Previous() BacklogSegment { - return s.previousSegment.Load() -} - func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { + s.messagesLock.RLock() + defer s.messagesLock.RUnlock() return s.messages } @@ -283,6 +285,9 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro startIndex := start - s.start.Load() endIndex := end - s.start.Load() + 1 + + s.messagesLock.RLock() + defer s.messagesLock.RUnlock() return s.messages[startIndex:endIndex], nil } @@ -292,6 +297,8 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro // message is ahead of the segment's end message append will return // errDropSegments to ensure any messages before the given message are dropped. func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) error { + s.messagesLock.Lock() + defer s.messagesLock.Unlock() seen := false defer s.updateSegment(&seen) @@ -309,14 +316,26 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) } // Contains confirms whether the segment contains a message with the given sequence number -func (s *backlogSegment) Contains(i uint64) bool { - if i < s.start.Load() || i > s.end.Load() { - return false +func (s *backlogSegment) Delete(confirmed uint64) error { + seen := false + defer s.updateSegment(&seen) + + start := s.start.Load() + end := s.end.Load() + if confirmed < start || confirmed > end { + return fmt.Errorf("confirmed message (%d) is not in current backlog (%d-%d)", confirmed, start, end) } - msgIndex := i - s.start.Load() + msgIndex := confirmed - start + s.messagesLock.Lock() msg := s.messages[msgIndex] - return uint64(msg.SequenceNumber) == i + if uint64(msg.SequenceNumber) != confirmed { + return fmt.Errorf("confirmed message (%d) is not in expected index (%d) in current backlog (%d-%d)", confirmed, msgIndex, start, end) + } + + s.messages = s.messages[msgIndex+1:] + s.messagesLock.Unlock() + return nil } // updateSegment updates the messageCount, start and end indices of the segment diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 4a31e6c579..e7b4f5f631 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -31,6 +31,12 @@ func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupK t.Errorf("failed to find message (%d) in lookup", k) } } + + expLen := len(lookupKeys) + actualLen := len(b.lookupByIndex) + if expLen != actualLen { + t.Errorf("expected length of lookupByIndex map (%d) does not equal actual length (%d)", expLen, actualLen) + } } func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCount int, start, end uint64) { @@ -111,7 +117,7 @@ func TestAppend(t *testing.T) { 3, // Message 44 is non sequential and the first message in a new segment, the previous messages will be dropped from the backlog 44, 46, - []arbutil.MessageIndex{45, 46}, + []arbutil.MessageIndex{44, 45, 46}, }, { "MessageSeenFirstSegmentMessage", @@ -208,22 +214,40 @@ func TestDelete(t *testing.T) { []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, }, { - "MsgInBacklog", + "FirstMsgInBacklog", []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, - 43, // only the first segment will be deleted - 4, - 43, + 40, // this is the first message in the backlog + 6, + 41, 46, - []arbutil.MessageIndex{43, 44, 45, 46}, + []arbutil.MessageIndex{41, 42, 43, 44, 45, 46}, }, { - "MsgInFirstSegmentInBacklog", + "FirstMsgInSegment", []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, - 42, // first segment will not be deleted as confirmed message is there - 7, - 40, + 43, // this is the first message in a middle segment of the backlog + 3, + 44, + 46, + []arbutil.MessageIndex{44, 45, 46}, + }, + { + "MiddleMsgInSegment", + []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + 44, // this is a message in the middle of a middle segment of the backlog + 2, + 45, 46, + []arbutil.MessageIndex{45, 46}, + }, + { + "LastMsgInSegment", []arbutil.MessageIndex{40, 41, 42, 43, 44, 45, 46}, + 45, // this is the last message in a middle segment of the backlog, the whole segment should be deleted along with any segments before it + 1, + 46, + 46, + []arbutil.MessageIndex{46}, }, { "MsgAfterBacklog", @@ -420,7 +444,7 @@ func TestBacklogRaceCondition(t *testing.T) { defer wg.Done() for _, i := range []uint64{40, 43, 47} { b.delete(i) - time.Sleep(5 * time.Millisecond) + time.Sleep(10 * time.Millisecond) } }(t, b) @@ -435,5 +459,5 @@ func TestBacklogRaceCondition(t *testing.T) { } // Messages up to 47 were deleted. However the segment that 47 was in is // kept, which is why the backlog starts at 46. - validateBacklog(t, b, 10, 46, 55, append(indexes, newIndexes...)) + validateBacklog(t, b, 8, 48, 55, newIndexes[1:]) } diff --git a/broadcaster/broadcaster_test.go b/broadcaster/broadcaster_test.go index 884f1de1d6..8ac06e9705 100644 --- a/broadcaster/broadcaster_test.go +++ b/broadcaster/broadcaster_test.go @@ -84,21 +84,21 @@ func TestBroadcasterMessagesRemovedOnConfirmation(t *testing.T) { waitUntilUpdated(t, expectMessageCount(6, "after 4 messages")) b.Confirm(4) - waitUntilUpdated(t, expectMessageCount(3, - "after 6 messages, 4 cleared by confirm (first backlog segment)")) + waitUntilUpdated(t, expectMessageCount(2, + "after 6 messages, 4 cleared by confirm")) b.Confirm(5) - waitUntilUpdated(t, expectMessageCount(3, - "after 6 messages, 5 cleared by confirm, but segment containing 5th message remains in backlog")) + waitUntilUpdated(t, expectMessageCount(1, + "after 6 messages, 5 cleared by confirm")) b.Confirm(4) - waitUntilUpdated(t, expectMessageCount(3, + waitUntilUpdated(t, expectMessageCount(1, "nothing changed because confirmed sequence number before cache")) b.Confirm(5) Require(t, b.BroadcastSingle(arbostypes.EmptyTestMessageWithMetadata, 7)) - waitUntilUpdated(t, expectMessageCount(4, - "after 7 messages, 4 cleared by confirm, but segment containing 5th message remains in backlog")) + waitUntilUpdated(t, expectMessageCount(2, + "after 7 messages, 5 cleared by confirm")) // Confirm not-yet-seen or already confirmed/cleared sequence numbers twice to force clearing cache b.Confirm(8) From a265e27ac4f7483ef539233b92fb67a23f43f5eb Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Tue, 14 Nov 2023 17:15:53 +1300 Subject: [PATCH 19/71] fix messages in broadcastclients --- broadcastclients/broadcastclients.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 551dcdb462..5422e38a9a 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -12,7 +12,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/broadcastclient" - "github.com/offchainlabs/nitro/broadcaster" + m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/contracts" "github.com/offchainlabs/nitro/util/stopwaiter" ) @@ -25,14 +25,14 @@ const PRIMARY_FEED_UPTIME = time.Minute * 10 type Router struct { stopwaiter.StopWaiter - messageChan chan broadcaster.BroadcastFeedMessage + messageChan chan m.BroadcastFeedMessage confirmedSequenceNumberChan chan arbutil.MessageIndex forwardTxStreamer broadcastclient.TransactionStreamerInterface forwardConfirmationChan chan arbutil.MessageIndex } -func (r *Router) AddBroadcastMessages(feedMessages []*broadcaster.BroadcastFeedMessage) error { +func (r *Router) AddBroadcastMessages(feedMessages []*m.BroadcastFeedMessage) error { for _, feedMessage := range feedMessages { r.messageChan <- *feedMessage } @@ -68,7 +68,7 @@ func NewBroadcastClients( } newStandardRouter := func() *Router { return &Router{ - messageChan: make(chan broadcaster.BroadcastFeedMessage, ROUTER_QUEUE_SIZE), + messageChan: make(chan m.BroadcastFeedMessage, ROUTER_QUEUE_SIZE), confirmedSequenceNumberChan: make(chan arbutil.MessageIndex, ROUTER_QUEUE_SIZE), forwardTxStreamer: txStreamer, forwardConfirmationChan: confirmedSequenceNumberListener, @@ -156,7 +156,7 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { continue } recentFeedItemsNew[msg.SequenceNumber] = time.Now() - if err := bcs.primaryRouter.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + if err := bcs.primaryRouter.forwardTxStreamer.AddBroadcastMessages([]*m.BroadcastFeedMessage{&msg}); err != nil { log.Error("Error routing message from Primary Sequencer Feeds", "err", err) } case cs := <-bcs.primaryRouter.confirmedSequenceNumberChan: @@ -178,7 +178,7 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { continue } recentFeedItemsNew[msg.SequenceNumber] = time.Now() - if err := bcs.secondaryRouter.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + if err := bcs.secondaryRouter.forwardTxStreamer.AddBroadcastMessages([]*m.BroadcastFeedMessage{&msg}); err != nil { log.Error("Error routing message from Secondary Sequencer Feeds", "err", err) } case cs := <-bcs.secondaryRouter.confirmedSequenceNumberChan: From 617c0d6e0c00fc0fbc38728072ba6fc660ecc578 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Tue, 14 Nov 2023 19:51:36 +1300 Subject: [PATCH 20/71] fix lint errors --- broadcaster/backlog/backlog.go | 14 ++++++++++++-- wsbroadcastserver/clientconnection.go | 2 +- wsbroadcastserver/clientmanager.go | 15 +++++++-------- 3 files changed, 20 insertions(+), 11 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index de786bd32f..3ef731386c 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -160,14 +160,24 @@ func (b *backlog) delete(confirmed uint64) { b.reset() return } - segment := found.(*backlogSegment) + segment, ok := found.(*backlogSegment) + if !ok { + log.Error("error in backlogSegment type assertion: clearing backlog") + b.reset() + return + } // delete messages from the segment with the confirmed message newHead := segment start := head.Start() if segment.End() == confirmed { found = segment.Next() - newHead = found.(*backlogSegment) + newHead, ok = found.(*backlogSegment) + if !ok { + log.Error("error in backlogSegment type assertion: clearing backlog") + b.reset() + return + } } else { err = segment.Delete(confirmed) if err != nil { diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 6b4c33b8f5..02dd300b35 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -157,7 +157,7 @@ func (cc *ClientConnection) writeBroadcastMessage(bm *m.BroadcastMessage) error return err } - data := []byte{} + var data []byte if cc.compression { data = compressed.Bytes() } else { diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 683d652c2e..7dc0cdcb52 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -30,14 +30,13 @@ import ( ) var ( - clientsCurrentGauge = metrics.NewRegisteredGauge("arb/feed/clients/current", nil) - clientsConnectCount = metrics.NewRegisteredCounter("arb/feed/clients/connect", nil) - clientsDisconnectCount = metrics.NewRegisteredCounter("arb/feed/clients/disconnect", nil) - clientsTotalSuccessCounter = metrics.NewRegisteredCounter("arb/feed/clients/success", nil) - clientsTotalFailedRegisterCounter = metrics.NewRegisteredCounter("arb/feed/clients/failed/register", nil) - clientsTotalFailedUpgradeCounter = metrics.NewRegisteredCounter("arb/feed/clients/failed/upgrade", nil) - clientsTotalFailedWorkerCounter = metrics.NewRegisteredCounter("arb/feed/clients/failed/worker", nil) - clientsDurationHistogram = metrics.NewRegisteredHistogram("arb/feed/clients/duration", nil, metrics.NewBoundedHistogramSample()) + clientsCurrentGauge = metrics.NewRegisteredGauge("arb/feed/clients/current", nil) + clientsConnectCount = metrics.NewRegisteredCounter("arb/feed/clients/connect", nil) + clientsDisconnectCount = metrics.NewRegisteredCounter("arb/feed/clients/disconnect", nil) + clientsTotalSuccessCounter = metrics.NewRegisteredCounter("arb/feed/clients/success", nil) + clientsTotalFailedUpgradeCounter = metrics.NewRegisteredCounter("arb/feed/clients/failed/upgrade", nil) + clientsTotalFailedWorkerCounter = metrics.NewRegisteredCounter("arb/feed/clients/failed/worker", nil) + clientsDurationHistogram = metrics.NewRegisteredHistogram("arb/feed/clients/duration", nil, metrics.NewBoundedHistogramSample()) ) // ClientManager manages client connections From 8e72df616333f39df56bf0d4f4eeb56a2829de29 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 15 Nov 2023 16:25:09 +1300 Subject: [PATCH 21/71] remove unnecessary exported functions from BacklogSegment interface and recreate the Contains method --- broadcaster/backlog/backlog.go | 46 ++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 3ef731386c..3f046516f8 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -108,21 +108,25 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { return nil, errOutOfBounds } - segment, err := b.Lookup(start) + found, err := b.Lookup(start) if err != nil { return nil, err } + segment, ok := found.(*backlogSegment) + if !ok { + return nil, fmt.Errorf("error in backlogSegment type assertion") + } bm := &m.BroadcastMessage{Version: 1} required := int(end-start) + 1 for { - segMsgs, err := segment.Get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End())) + segMsgs, err := segment.get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End())) if err != nil { return nil, err } bm.Messages = append(bm.Messages, segMsgs...) - segment = segment.Next() + segment = segment.nextSegment.Load() if len(bm.Messages) == required { break } else if segment == nil { @@ -179,7 +183,7 @@ func (b *backlog) delete(confirmed uint64) { return } } else { - err = segment.Delete(confirmed) + err = segment.delete(confirmed) if err != nil { log.Error(fmt.Sprintf("%s: clearing backlog", err.Error())) b.reset() @@ -233,9 +237,8 @@ type BacklogSegment interface { Start() uint64 End() uint64 Next() BacklogSegment - Get(uint64, uint64) ([]*m.BroadcastFeedMessage, error) + Contains(uint64) bool Messages() []*m.BroadcastFeedMessage - Delete(uint64) error } type backlogSegment struct { @@ -282,8 +285,8 @@ func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { return s.messages } -// Get reads messages from the given start to end MessageIndex -func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { +// get reads messages from the given start to end MessageIndex +func (s *backlogSegment) get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { noMsgs := []*m.BroadcastFeedMessage{} if start < s.start.Load() { return noMsgs, errOutOfBounds @@ -326,23 +329,34 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) } // Contains confirms whether the segment contains a message with the given sequence number -func (s *backlogSegment) Delete(confirmed uint64) error { +func (s *backlogSegment) Contains(i uint64) bool { + start := s.start.Load() + if i < start || i > s.end.Load() { + return false + } + + msgIndex := i - start + s.messagesLock.RLock() + msg := s.messages[msgIndex] + s.messagesLock.RUnlock() + if uint64(msg.SequenceNumber) != i { + return false + } + return true +} + +func (s *backlogSegment) delete(confirmed uint64) error { seen := false defer s.updateSegment(&seen) start := s.start.Load() end := s.end.Load() - if confirmed < start || confirmed > end { - return fmt.Errorf("confirmed message (%d) is not in current backlog (%d-%d)", confirmed, start, end) - } - msgIndex := confirmed - start - s.messagesLock.Lock() - msg := s.messages[msgIndex] - if uint64(msg.SequenceNumber) != confirmed { + if !s.Contains(confirmed) { return fmt.Errorf("confirmed message (%d) is not in expected index (%d) in current backlog (%d-%d)", confirmed, msgIndex, start, end) } + s.messagesLock.Lock() s.messages = s.messages[msgIndex+1:] s.messagesLock.Unlock() return nil From ea46fd9032fb17628ce632909a68cf7e6aad2e14 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 15 Nov 2023 16:26:17 +1300 Subject: [PATCH 22/71] ensure the ClientConnection only sends the requested messages to the client rather than the whole segment --- wsbroadcastserver/clientconnection.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 02dd300b35..1b570a36d2 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -132,6 +132,10 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba } msgs := prevSegment.Messages() + if prevSegment.Contains(uint64(cc.requestedSeqNum)) { + requestedIdx := int(cc.requestedSeqNum) - int(prevSegment.Start()) + msgs = msgs[requestedIdx:] + } bm := &m.BroadcastMessage{ Version: 1, // TODO(clamb): I am unsure if it is correct to hard code the version here like this? It seems to be done in other places though Messages: msgs, From 60ce3825a481bcd79690db075175b6b8a9ef7113 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 15 Nov 2023 16:42:59 +1300 Subject: [PATCH 23/71] fix backlog lint error --- broadcaster/backlog/backlog.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 3f046516f8..8fe721dc38 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -339,10 +339,7 @@ func (s *backlogSegment) Contains(i uint64) bool { s.messagesLock.RLock() msg := s.messages[msgIndex] s.messagesLock.RUnlock() - if uint64(msg.SequenceNumber) != i { - return false - } - return true + return uint64(msg.SequenceNumber) == i } func (s *backlogSegment) delete(confirmed uint64) error { From 5e2f1aa1f6ac6daefa8c2bfd91e05811c71b97ef Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 16 Nov 2023 15:33:50 +1300 Subject: [PATCH 24/71] ensure WSBroadcast server sends the whole backlog when the requested seq num is lower than what is in the backlog --- broadcaster/backlog/backlog.go | 5 +++++ wsbroadcastserver/clientconnection.go | 15 +++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 8fe721dc38..1618fc5ee7 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -18,6 +18,7 @@ var ( ) type Backlog interface { + Head() BacklogSegment Append(*m.BroadcastMessage) error Get(uint64, uint64) (*m.BroadcastMessage, error) Count() uint64 @@ -41,6 +42,10 @@ func NewBacklog(c ConfigFetcher) Backlog { } } +func (b *backlog) Head() BacklogSegment { + return b.head.Load() +} + // Append will add the given messages to the backlogSegment at head until // that segment reaches its limit. If messages remain to be added a new segment // will be created. diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 1b570a36d2..9e82b09a6a 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -191,13 +191,16 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { // Send the current backlog before registering the ClientConnection in // case the backlog is very large - segment, err := cc.backlog.Lookup(uint64(cc.requestedSeqNum)) - if err != nil { - logWarn(err, "error finding requested sequence number in backlog") - cc.Remove() - return + segment := cc.backlog.Head() + if !backlog.IsBacklogSegmentNil(segment) && segment.Start() < uint64(cc.requestedSeqNum) { + s, err := cc.backlog.Lookup(uint64(cc.requestedSeqNum)) + if err != nil { + logWarn(err, "error finding requested sequence number in backlog: sending the entire backlog instead") + } else { + segment = s + } } - err = cc.writeBacklog(ctx, segment) + err := cc.writeBacklog(ctx, segment) if errors.Is(err, errContextDone) { return } else if err != nil { From 812e7a6399e13e40330108fac65d9449819e3712 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:55:31 +1300 Subject: [PATCH 25/71] add comments to backlog functions --- broadcaster/backlog/backlog.go | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 1618fc5ee7..999dd3b6aa 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -17,6 +17,7 @@ var ( errOutOfBounds = errors.New("message not found in backlog") ) +// Backlog defines the interface for backlog. type Backlog interface { Head() BacklogSegment Append(*m.BroadcastMessage) error @@ -25,6 +26,8 @@ type Backlog interface { Lookup(uint64) (BacklogSegment, error) } +// backlog stores backlogSegments and provides the ability to read/write +// messages. type backlog struct { head atomic.Pointer[backlogSegment] tail atomic.Pointer[backlogSegment] @@ -34,6 +37,7 @@ type backlog struct { messageCount atomic.Uint64 } +// NewBacklog creates a backlog. func NewBacklog(c ConfigFetcher) Backlog { lookup := make(map[uint64]*backlogSegment) return &backlog{ @@ -42,6 +46,7 @@ func NewBacklog(c ConfigFetcher) Backlog { } } +// Head return the head backlogSegment within the backlog. func (b *backlog) Head() BacklogSegment { return b.head.Load() } @@ -204,7 +209,7 @@ func (b *backlog) delete(confirmed uint64) { } // removeFromLookup removes all entries from the head segment's start index to -// the given confirmed index +// the given confirmed index. func (b *backlog) removeFromLookup(start, end uint64) { b.lookupLock.Lock() defer b.lookupLock.Unlock() @@ -213,6 +218,7 @@ func (b *backlog) removeFromLookup(start, end uint64) { } } +// Lookup attempts to find the backlogSegment storing the given message index. func (b *backlog) Lookup(i uint64) (BacklogSegment, error) { b.lookupLock.RLock() segment, ok := b.lookupByIndex[i] @@ -224,11 +230,12 @@ func (b *backlog) Lookup(i uint64) (BacklogSegment, error) { return segment, nil } +// Count returns the number of messages stored within the backlog. func (s *backlog) Count() uint64 { return s.messageCount.Load() } -// reset removes all segments from the backlog +// reset removes all segments from the backlog. func (b *backlog) reset() { b.lookupLock.Lock() defer b.lookupLock.Unlock() @@ -238,6 +245,7 @@ func (b *backlog) reset() { b.messageCount.Store(0) } +// BacklogSegment defines the interface for backlogSegment. type BacklogSegment interface { Start() uint64 End() uint64 @@ -246,6 +254,8 @@ type BacklogSegment interface { Messages() []*m.BroadcastFeedMessage } +// backlogSegment stores messages up to a limit defined by the backlog. It also +// points to the next backlogSegment in the list. type backlogSegment struct { start atomic.Uint64 end atomic.Uint64 @@ -272,25 +282,29 @@ func IsBacklogSegmentNil(segment BacklogSegment) bool { return segment.(*backlogSegment) == nil } +// Start returns the first message index within the backlogSegment. func (s *backlogSegment) Start() uint64 { return uint64(s.start.Load()) } +// End returns the last message index within the backlogSegment. func (s *backlogSegment) End() uint64 { return uint64(s.end.Load()) } +// Next returns the next backlogSegment. func (s *backlogSegment) Next() BacklogSegment { return s.nextSegment.Load() } +// Messages returns all of the messages stored in the backlogSegment. func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { s.messagesLock.RLock() defer s.messagesLock.RUnlock() return s.messages } -// get reads messages from the given start to end MessageIndex +// get reads messages from the given start to end message index. func (s *backlogSegment) get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { noMsgs := []*m.BroadcastFeedMessage{} if start < s.start.Load() { @@ -333,7 +347,8 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) return nil } -// Contains confirms whether the segment contains a message with the given sequence number +// Contains confirms whether the segment contains a message with the given +// sequence number. func (s *backlogSegment) Contains(i uint64) bool { start := s.start.Load() if i < start || i > s.end.Load() { @@ -347,6 +362,8 @@ func (s *backlogSegment) Contains(i uint64) bool { return uint64(msg.SequenceNumber) == i } +// delete removes messages from the backlogSegment up to and including the +// given confirmed message index. func (s *backlogSegment) delete(confirmed uint64) error { seen := false defer s.updateSegment(&seen) @@ -376,7 +393,7 @@ func (s *backlogSegment) updateSegment(seen *bool) { } } -// count returns the number of messages stored in the backlog segment +// count returns the number of messages stored in the backlog segment. func (s *backlogSegment) count() int { return int(s.messageCount.Load()) } From 0be22bc96361aa2a08a22d0a7dc792f5855ce89d Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:07:47 +1300 Subject: [PATCH 26/71] change lookupByKey to containers.SyncMap and remove lookupLock --- broadcaster/backlog/backlog.go | 22 +++++++--------------- broadcaster/backlog/backlog_test.go | 9 +++++---- 2 files changed, 12 insertions(+), 19 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 999dd3b6aa..391d856146 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/log" m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/containers" ) var ( @@ -31,15 +32,14 @@ type Backlog interface { type backlog struct { head atomic.Pointer[backlogSegment] tail atomic.Pointer[backlogSegment] - lookupLock sync.RWMutex - lookupByIndex map[uint64]*backlogSegment + lookupByIndex containers.SyncMap[uint64, *backlogSegment] config ConfigFetcher messageCount atomic.Uint64 } // NewBacklog creates a backlog. func NewBacklog(c ConfigFetcher) Backlog { - lookup := make(map[uint64]*backlogSegment) + lookup := containers.SyncMap[uint64, *backlogSegment]{} return &backlog{ lookupByIndex: lookup, config: c, @@ -93,9 +93,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { } else if err != nil { return err } - b.lookupLock.Lock() - b.lookupByIndex[uint64(msg.SequenceNumber)] = segment - b.lookupLock.Unlock() + b.lookupByIndex.Store(uint64(msg.SequenceNumber), segment) b.messageCount.Add(1) } @@ -211,18 +209,14 @@ func (b *backlog) delete(confirmed uint64) { // removeFromLookup removes all entries from the head segment's start index to // the given confirmed index. func (b *backlog) removeFromLookup(start, end uint64) { - b.lookupLock.Lock() - defer b.lookupLock.Unlock() for i := start; i <= end; i++ { - delete(b.lookupByIndex, i) + b.lookupByIndex.Delete(i) } } // Lookup attempts to find the backlogSegment storing the given message index. func (b *backlog) Lookup(i uint64) (BacklogSegment, error) { - b.lookupLock.RLock() - segment, ok := b.lookupByIndex[i] - b.lookupLock.RUnlock() + segment, ok := b.lookupByIndex.Load(i) if !ok { return nil, fmt.Errorf("error finding backlog segment containing message with SequenceNumber %d", i) } @@ -237,11 +231,9 @@ func (s *backlog) Count() uint64 { // reset removes all segments from the backlog. func (b *backlog) reset() { - b.lookupLock.Lock() - defer b.lookupLock.Unlock() b.head = atomic.Pointer[backlogSegment]{} b.tail = atomic.Pointer[backlogSegment]{} - b.lookupByIndex = map[uint64]*backlogSegment{} + b.lookupByIndex = containers.SyncMap[uint64, *backlogSegment]{} b.messageCount.Store(0) } diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index e7b4f5f631..251f11e332 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -9,6 +9,7 @@ import ( "github.com/offchainlabs/nitro/arbutil" m "github.com/offchainlabs/nitro/broadcaster/message" "github.com/offchainlabs/nitro/util/arbmath" + "github.com/offchainlabs/nitro/util/containers" ) func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupKeys []arbutil.MessageIndex) { @@ -33,7 +34,7 @@ func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupK } expLen := len(lookupKeys) - actualLen := len(b.lookupByIndex) + actualLen := int(b.Count()) if expLen != actualLen { t.Errorf("expected length of lookupByIndex map (%d) does not equal actual length (%d)", expLen, actualLen) } @@ -56,7 +57,7 @@ func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCoun func createDummyBacklog(indexes []arbutil.MessageIndex) (*backlog, error) { b := &backlog{ - lookupByIndex: map[uint64]*backlogSegment{}, + lookupByIndex: containers.SyncMap[uint64, *backlogSegment]{}, config: func() *Config { return &DefaultTestConfig }, } bm := &m.BroadcastMessage{Messages: m.CreateDummyBroadcastMessages(indexes)} @@ -160,8 +161,8 @@ func TestDeleteInvalidBacklog(t *testing.T) { s.end.Store(42) s.messageCount.Store(2) - lookup := make(map[uint64]*backlogSegment) - lookup[40] = s + lookup := containers.SyncMap[uint64, *backlogSegment]{} + lookup.Store(40, s) b := &backlog{ lookupByIndex: lookup, config: func() *Config { return &DefaultTestConfig }, From ffa66361a8d59b475ba8e8fdd65c08b666362e4d Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:10:07 +1300 Subject: [PATCH 27/71] fix log.info line --- broadcaster/backlog/backlog.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 391d856146..b625217eaa 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -88,7 +88,7 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { b.messageCount.Store(0) log.Warn(err.Error()) } else if errors.Is(err, errSequenceNumberSeen) { - log.Info("ignoring message sequence number (%s), already in backlog", msg.SequenceNumber) + log.Info("ignoring message sequence number, already in backlog", "message sequence number", msg.SequenceNumber) continue } else if err != nil { return err From 3403b7bb15da861f77256a1760baf8c85989501a Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:27:39 +1300 Subject: [PATCH 28/71] fix potential delete race condition in backlog.Get --- broadcaster/backlog/backlog.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index b625217eaa..002031c6c9 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -108,16 +108,17 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { return nil, errOutOfBounds } - if start < head.Start() { - start = head.Start() - } - if end > tail.End() { return nil, errOutOfBounds } found, err := b.Lookup(start) - if err != nil { + if start < head.Start() { + // doing this check after the Lookup call ensures there is no race + // condition with a delete call + start = head.Start() + found = head + } else if err != nil { return nil, err } segment, ok := found.(*backlogSegment) From d9d479dd5ba42a193b29e8d8724dba9f97db13de Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:30:39 +1300 Subject: [PATCH 29/71] remove backlogSegment type assertion --- broadcaster/backlog/backlog.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 002031c6c9..718468b23d 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -112,30 +112,26 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { return nil, errOutOfBounds } - found, err := b.Lookup(start) + segment, err := b.Lookup(start) if start < head.Start() { // doing this check after the Lookup call ensures there is no race // condition with a delete call start = head.Start() - found = head + segment = head } else if err != nil { return nil, err } - segment, ok := found.(*backlogSegment) - if !ok { - return nil, fmt.Errorf("error in backlogSegment type assertion") - } bm := &m.BroadcastMessage{Version: 1} required := int(end-start) + 1 for { - segMsgs, err := segment.get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End())) + segMsgs, err := segment.Get(arbmath.MaxInt(start, segment.Start()), arbmath.MinInt(end, segment.End())) if err != nil { return nil, err } bm.Messages = append(bm.Messages, segMsgs...) - segment = segment.nextSegment.Load() + segment = segment.Next() if len(bm.Messages) == required { break } else if segment == nil { @@ -245,6 +241,7 @@ type BacklogSegment interface { Next() BacklogSegment Contains(uint64) bool Messages() []*m.BroadcastFeedMessage + Get(uint64, uint64) ([]*m.BroadcastFeedMessage, error) } // backlogSegment stores messages up to a limit defined by the backlog. It also @@ -297,8 +294,8 @@ func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { return s.messages } -// get reads messages from the given start to end message index. -func (s *backlogSegment) get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { +// Get reads messages from the given start to end message index. +func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { noMsgs := []*m.BroadcastFeedMessage{} if start < s.start.Load() { return noMsgs, errOutOfBounds From 1fc038c4e5262081878104f461c4de0807133df1 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:22:05 +1300 Subject: [PATCH 30/71] remove backlogSegment.start, backlogSegment.end and backlogSegment.messageCount, use the backlogSegment.messagesLock to calculate these when required --- broadcaster/backlog/backlog.go | 58 +++++++++++++---------------- broadcaster/backlog/backlog_test.go | 11 ++---- 2 files changed, 29 insertions(+), 40 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 718468b23d..f469abe563 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -157,7 +157,7 @@ func (b *backlog) delete(confirmed uint64) { } if confirmed > tail.End() { - log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.end.Load()) + log.Error("confirmed sequence number is past the end of stored messages", "confirmed sequence number", confirmed, "last stored sequence number", tail.End()) b.reset() return } @@ -247,11 +247,8 @@ type BacklogSegment interface { // backlogSegment stores messages up to a limit defined by the backlog. It also // points to the next backlogSegment in the list. type backlogSegment struct { - start atomic.Uint64 - end atomic.Uint64 messagesLock sync.RWMutex messages []*m.BroadcastFeedMessage - messageCount atomic.Uint64 nextSegment atomic.Pointer[backlogSegment] previousSegment atomic.Pointer[backlogSegment] } @@ -274,12 +271,23 @@ func IsBacklogSegmentNil(segment BacklogSegment) bool { // Start returns the first message index within the backlogSegment. func (s *backlogSegment) Start() uint64 { - return uint64(s.start.Load()) + s.messagesLock.RLock() + defer s.messagesLock.RUnlock() + if len(s.messages) > 0 { + return uint64(s.messages[0].SequenceNumber) + } + return uint64(0) } // End returns the last message index within the backlogSegment. func (s *backlogSegment) End() uint64 { - return uint64(s.end.Load()) + s.messagesLock.RLock() + defer s.messagesLock.RUnlock() + c := len(s.messages) + if c == 0 { + return uint64(0) + } + return uint64(s.messages[c-1].SequenceNumber) } // Next returns the next backlogSegment. @@ -297,16 +305,16 @@ func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { // Get reads messages from the given start to end message index. func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { noMsgs := []*m.BroadcastFeedMessage{} - if start < s.start.Load() { + if start < s.Start() { return noMsgs, errOutOfBounds } - if end > s.end.Load() { + if end > s.End() { return noMsgs, errOutOfBounds } - startIndex := start - s.start.Load() - endIndex := end - s.start.Load() + 1 + startIndex := start - s.Start() + endIndex := end - s.Start() + 1 s.messagesLock.RLock() defer s.messagesLock.RUnlock() @@ -321,8 +329,6 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) error { s.messagesLock.Lock() defer s.messagesLock.Unlock() - seen := false - defer s.updateSegment(&seen) if expSeqNum := prevMsgIdx + 1; prevMsgIdx == 0 || uint64(msg.SequenceNumber) == expSeqNum { s.messages = append(s.messages, msg) @@ -331,7 +337,6 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) s.messages = append(s.messages, msg) return fmt.Errorf("new message sequence number (%d) is greater than the expected sequence number (%d): %w", msg.SequenceNumber, expSeqNum, errDropSegments) } else { - seen = true return errSequenceNumberSeen } return nil @@ -340,8 +345,8 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) // Contains confirms whether the segment contains a message with the given // sequence number. func (s *backlogSegment) Contains(i uint64) bool { - start := s.start.Load() - if i < start || i > s.end.Load() { + start := s.Start() + if i < start || i > s.End() { return false } @@ -355,11 +360,8 @@ func (s *backlogSegment) Contains(i uint64) bool { // delete removes messages from the backlogSegment up to and including the // given confirmed message index. func (s *backlogSegment) delete(confirmed uint64) error { - seen := false - defer s.updateSegment(&seen) - - start := s.start.Load() - end := s.end.Load() + start := s.Start() + end := s.End() msgIndex := confirmed - start if !s.Contains(confirmed) { return fmt.Errorf("confirmed message (%d) is not in expected index (%d) in current backlog (%d-%d)", confirmed, msgIndex, start, end) @@ -371,19 +373,9 @@ func (s *backlogSegment) delete(confirmed uint64) error { return nil } -// updateSegment updates the messageCount, start and end indices of the segment -// this should be called using defer whenever a method updates the messages. It -// will do nothing if the message has already been seen by the backlog. -func (s *backlogSegment) updateSegment(seen *bool) { - if !*seen { - c := len(s.messages) - s.messageCount.Store(uint64(c)) - s.start.Store(uint64(s.messages[0].SequenceNumber)) - s.end.Store(uint64(s.messages[c-1].SequenceNumber)) - } -} - // count returns the number of messages stored in the backlog segment. func (s *backlogSegment) count() int { - return int(s.messageCount.Load()) + s.messagesLock.RLock() + defer s.messagesLock.RUnlock() + return len(s.messages) } diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 251f11e332..4591f268d0 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -18,13 +18,13 @@ func validateBacklog(t *testing.T, b *backlog, count, start, end uint64, lookupK } head := b.head.Load() - if start != 0 && head.start.Load() != start { - t.Errorf("head of backlog (%d) does not equal expected head (%d)", head.start.Load(), start) + if start != 0 && head.Start() != start { + t.Errorf("head of backlog (%d) does not equal expected head (%d)", head.Start(), start) } tail := b.tail.Load() - if end != 0 && tail.end.Load() != end { - t.Errorf("tail of backlog (%d) does not equal expected tail (%d)", tail.end.Load(), end) + if end != 0 && tail.End() != end { + t.Errorf("tail of backlog (%d) does not equal expected tail (%d)", tail.End(), end) } for _, k := range lookupKeys { @@ -157,9 +157,6 @@ func TestDeleteInvalidBacklog(t *testing.T) { s := &backlogSegment{ messages: m.CreateDummyBroadcastMessages([]arbutil.MessageIndex{40, 42}), } - s.start.Store(40) - s.end.Store(42) - s.messageCount.Store(2) lookup := containers.SyncMap[uint64, *backlogSegment]{} lookup.Store(40, s) From 11e26ed0a7a4f530346a30de9d112f14483e94c4 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:33:06 +1300 Subject: [PATCH 31/71] fix go vet errors from containers.SyncMap addition, change to a pointer to containers.SyncMap --- broadcaster/backlog/backlog.go | 6 +++--- broadcaster/backlog/backlog_test.go | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index f469abe563..86a13e277d 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -32,14 +32,14 @@ type Backlog interface { type backlog struct { head atomic.Pointer[backlogSegment] tail atomic.Pointer[backlogSegment] - lookupByIndex containers.SyncMap[uint64, *backlogSegment] + lookupByIndex *containers.SyncMap[uint64, *backlogSegment] config ConfigFetcher messageCount atomic.Uint64 } // NewBacklog creates a backlog. func NewBacklog(c ConfigFetcher) Backlog { - lookup := containers.SyncMap[uint64, *backlogSegment]{} + lookup := &containers.SyncMap[uint64, *backlogSegment]{} return &backlog{ lookupByIndex: lookup, config: c, @@ -230,7 +230,7 @@ func (s *backlog) Count() uint64 { func (b *backlog) reset() { b.head = atomic.Pointer[backlogSegment]{} b.tail = atomic.Pointer[backlogSegment]{} - b.lookupByIndex = containers.SyncMap[uint64, *backlogSegment]{} + b.lookupByIndex = &containers.SyncMap[uint64, *backlogSegment]{} b.messageCount.Store(0) } diff --git a/broadcaster/backlog/backlog_test.go b/broadcaster/backlog/backlog_test.go index 4591f268d0..ab25a523f7 100644 --- a/broadcaster/backlog/backlog_test.go +++ b/broadcaster/backlog/backlog_test.go @@ -57,7 +57,7 @@ func validateBroadcastMessage(t *testing.T, bm *m.BroadcastMessage, expectedCoun func createDummyBacklog(indexes []arbutil.MessageIndex) (*backlog, error) { b := &backlog{ - lookupByIndex: containers.SyncMap[uint64, *backlogSegment]{}, + lookupByIndex: &containers.SyncMap[uint64, *backlogSegment]{}, config: func() *Config { return &DefaultTestConfig }, } bm := &m.BroadcastMessage{Messages: m.CreateDummyBroadcastMessages(indexes)} @@ -158,7 +158,7 @@ func TestDeleteInvalidBacklog(t *testing.T) { messages: m.CreateDummyBroadcastMessages([]arbutil.MessageIndex{40, 42}), } - lookup := containers.SyncMap[uint64, *backlogSegment]{} + lookup := &containers.SyncMap[uint64, *backlogSegment]{} lookup.Store(40, s) b := &backlog{ lookupByIndex: lookup, From c098840875f35395705d27c282b8ffebbb30ea8b Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:58:37 +1300 Subject: [PATCH 32/71] return nil interface from backlogSegment.Next rather than nil *backlogSegment --- broadcaster/backlog/backlog.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 86a13e277d..232a0ab6c8 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -292,7 +292,11 @@ func (s *backlogSegment) End() uint64 { // Next returns the next backlogSegment. func (s *backlogSegment) Next() BacklogSegment { - return s.nextSegment.Load() + next := s.nextSegment.Load() + if next == nil { + return nil // return a nil interface instead of a nil *backlogSegment + } + return next } // Messages returns all of the messages stored in the backlogSegment. From ccb64991ec0cf65bcf1dfa014f7a221b08c8598c Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 16:25:29 +1300 Subject: [PATCH 33/71] return copies of messages slice from backlogSegment.Get and backlogSegment.Messages --- broadcaster/backlog/backlog.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 232a0ab6c8..d694b0287e 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -64,14 +64,14 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { for _, msg := range bm.Messages { segment := b.tail.Load() if segment == nil { - segment = newBacklogSegment() + segment = newBacklogSegment(b.config) b.head.Store(segment) b.tail.Store(segment) } prevMsgIdx := segment.End() if segment.count() >= b.config().SegmentLimit { - nextSegment := newBacklogSegment() + nextSegment := newBacklogSegment(b.config) segment.nextSegment.Store(nextSegment) prevMsgIdx = segment.End() nextSegment.previousSegment.Store(segment) @@ -247,6 +247,7 @@ type BacklogSegment interface { // backlogSegment stores messages up to a limit defined by the backlog. It also // points to the next backlogSegment in the list. type backlogSegment struct { + config ConfigFetcher messagesLock sync.RWMutex messages []*m.BroadcastFeedMessage nextSegment atomic.Pointer[backlogSegment] @@ -256,8 +257,9 @@ type backlogSegment struct { // newBacklogSegment creates a backlogSegment object with an empty slice of // messages. It does not return an interface as it is only used inside the // backlog library. -func newBacklogSegment() *backlogSegment { +func newBacklogSegment(c ConfigFetcher) *backlogSegment { return &backlogSegment{ + config: c, messages: []*m.BroadcastFeedMessage{}, } } @@ -303,7 +305,9 @@ func (s *backlogSegment) Next() BacklogSegment { func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { s.messagesLock.RLock() defer s.messagesLock.RUnlock() - return s.messages + tmp := make([]*m.BroadcastFeedMessage, s.config().SegmentLimit) + copy(tmp, s.messages) + return tmp } // Get reads messages from the given start to end message index. @@ -322,7 +326,9 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro s.messagesLock.RLock() defer s.messagesLock.RUnlock() - return s.messages[startIndex:endIndex], nil + tmp := make([]*m.BroadcastFeedMessage, s.config().SegmentLimit) + copy(tmp, s.messages) + return tmp[startIndex:endIndex], nil } // append appends the given BroadcastFeedMessage to messages if it is the first From 25372473fc2ce3f0f5491df45ea53346bfe4f848 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 16:27:06 +1300 Subject: [PATCH 34/71] get messagesLock for entire backlogSegment.Get and backlogSegment.Contains functions --- broadcaster/backlog/backlog.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index d694b0287e..9b5c7dd108 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -312,6 +312,8 @@ func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { // Get reads messages from the given start to end message index. func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, error) { + s.messagesLock.RLock() + defer s.messagesLock.RUnlock() noMsgs := []*m.BroadcastFeedMessage{} if start < s.Start() { return noMsgs, errOutOfBounds @@ -324,8 +326,6 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro startIndex := start - s.Start() endIndex := end - s.Start() + 1 - s.messagesLock.RLock() - defer s.messagesLock.RUnlock() tmp := make([]*m.BroadcastFeedMessage, s.config().SegmentLimit) copy(tmp, s.messages) return tmp[startIndex:endIndex], nil @@ -355,15 +355,15 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) // Contains confirms whether the segment contains a message with the given // sequence number. func (s *backlogSegment) Contains(i uint64) bool { + s.messagesLock.RLock() + defer s.messagesLock.RUnlock() start := s.Start() if i < start || i > s.End() { return false } msgIndex := i - start - s.messagesLock.RLock() msg := s.messages[msgIndex] - s.messagesLock.RUnlock() return uint64(msg.SequenceNumber) == i } From 369b5ea649bb063c68e315a98809e90ee2433935 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Thu, 30 Nov 2023 16:31:11 +1300 Subject: [PATCH 35/71] remove metric TODO --- broadcaster/backlog/backlog.go | 1 - 1 file changed, 1 deletion(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 9b5c7dd108..89ee371f1b 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -58,7 +58,6 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { if bm.ConfirmedSequenceNumberMessage != nil { b.delete(uint64(bm.ConfirmedSequenceNumberMessage.SequenceNumber)) - // TODO(clamb): add to metric? } for _, msg := range bm.Messages { From c86fcfdbf5b730cba510cda25ac93a87cdec1868 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Fri, 1 Dec 2023 15:12:29 +1300 Subject: [PATCH 36/71] use length of messages slice in copy rather than segment limit to avoid slice being populated with nils --- broadcaster/backlog/backlog.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 89ee371f1b..6e051713b5 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -63,14 +63,14 @@ func (b *backlog) Append(bm *m.BroadcastMessage) error { for _, msg := range bm.Messages { segment := b.tail.Load() if segment == nil { - segment = newBacklogSegment(b.config) + segment = newBacklogSegment() b.head.Store(segment) b.tail.Store(segment) } prevMsgIdx := segment.End() if segment.count() >= b.config().SegmentLimit { - nextSegment := newBacklogSegment(b.config) + nextSegment := newBacklogSegment() segment.nextSegment.Store(nextSegment) prevMsgIdx = segment.End() nextSegment.previousSegment.Store(segment) @@ -246,7 +246,6 @@ type BacklogSegment interface { // backlogSegment stores messages up to a limit defined by the backlog. It also // points to the next backlogSegment in the list. type backlogSegment struct { - config ConfigFetcher messagesLock sync.RWMutex messages []*m.BroadcastFeedMessage nextSegment atomic.Pointer[backlogSegment] @@ -256,9 +255,8 @@ type backlogSegment struct { // newBacklogSegment creates a backlogSegment object with an empty slice of // messages. It does not return an interface as it is only used inside the // backlog library. -func newBacklogSegment(c ConfigFetcher) *backlogSegment { +func newBacklogSegment() *backlogSegment { return &backlogSegment{ - config: c, messages: []*m.BroadcastFeedMessage{}, } } @@ -304,7 +302,7 @@ func (s *backlogSegment) Next() BacklogSegment { func (s *backlogSegment) Messages() []*m.BroadcastFeedMessage { s.messagesLock.RLock() defer s.messagesLock.RUnlock() - tmp := make([]*m.BroadcastFeedMessage, s.config().SegmentLimit) + tmp := make([]*m.BroadcastFeedMessage, len(s.messages)) copy(tmp, s.messages) return tmp } @@ -325,7 +323,7 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro startIndex := start - s.Start() endIndex := end - s.Start() + 1 - tmp := make([]*m.BroadcastFeedMessage, s.config().SegmentLimit) + tmp := make([]*m.BroadcastFeedMessage, len(s.messages)) copy(tmp, s.messages) return tmp[startIndex:endIndex], nil } From b70648145cee50871a2d1fec24af4cfa6579b01f Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Fri, 1 Dec 2023 15:15:08 +1300 Subject: [PATCH 37/71] change IsBacklogSegmentNil to check for nil interface as backlogSegment.Next now returns a nil interface --- broadcaster/backlog/backlog.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 6e051713b5..d39cde29a6 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -265,7 +265,12 @@ func newBacklogSegment() *backlogSegment { // variable of type BacklogSegment is nil or not. Comparing whether an // interface is nil directly will not work. func IsBacklogSegmentNil(segment BacklogSegment) bool { - return segment.(*backlogSegment) == nil + if segment == nil { + return true + } else if segment.(*backlogSegment) == nil { + return true + } + return false } // Start returns the first message index within the backlogSegment. From 7334858d0f311a5bdbddec9749e7c4f782fb8d67 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Fri, 1 Dec 2023 15:18:55 +1300 Subject: [PATCH 38/71] add version as a constant to messages lib --- broadcaster/message/message.go | 4 ++++ wsbroadcastserver/clientconnection.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/broadcaster/message/message.go b/broadcaster/message/message.go index d42e0c8b60..f436e765cb 100644 --- a/broadcaster/message/message.go +++ b/broadcaster/message/message.go @@ -6,6 +6,10 @@ import ( "github.com/offchainlabs/nitro/arbutil" ) +const ( + V1 = 1 +) + // BroadcastMessage is the base message type for messages to send over the network. // // Acts as a variant holding the message types. The type of the message is diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 9e82b09a6a..9036fc8f2c 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -137,7 +137,7 @@ func (cc *ClientConnection) writeBacklog(ctx context.Context, segment backlog.Ba msgs = msgs[requestedIdx:] } bm := &m.BroadcastMessage{ - Version: 1, // TODO(clamb): I am unsure if it is correct to hard code the version here like this? It seems to be done in other places though + Version: m.V1, Messages: msgs, } err := cc.writeBroadcastMessage(bm) From 39514fc316cacb0a184d397d8006616624c381e5 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 4 Dec 2023 15:09:06 +1300 Subject: [PATCH 39/71] correct broadcaster lib --- broadcastclients/broadcastclients.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index bdf5f16d24..d85ecb2c24 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -141,7 +141,7 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { defer stopSecondaryFeedTimer.Stop() defer primaryFeedIsDownTimer.Stop() - msgHandler := func(msg broadcaster.BroadcastFeedMessage, router *Router) error { + msgHandler := func(msg m.BroadcastFeedMessage, router *Router) error { if _, ok := recentFeedItemsNew[msg.SequenceNumber]; ok { return nil } @@ -149,7 +149,7 @@ func (bcs *BroadcastClients) Start(ctx context.Context) { return nil } recentFeedItemsNew[msg.SequenceNumber] = time.Now() - if err := router.forwardTxStreamer.AddBroadcastMessages([]*broadcaster.BroadcastFeedMessage{&msg}); err != nil { + if err := router.forwardTxStreamer.AddBroadcastMessages([]*m.BroadcastFeedMessage{&msg}); err != nil { return err } return nil From 6d1eca1d246413c0708b0f5095f5bb338abdd24d Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 4 Dec 2023 15:51:19 +1300 Subject: [PATCH 40/71] only send one message with the confirmed sequence number --- wsbroadcastserver/clientmanager.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 7dc0cdcb52..1777da642c 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -326,9 +326,12 @@ func (cm *ClientManager) Start(parentCtx context.Context) { var err error for _, msg := range bm.Messages { m := &m.BroadcastMessage{ - Version: bm.Version, - Messages: []*m.BroadcastFeedMessage{msg}, - ConfirmedSequenceNumberMessage: bm.ConfirmedSequenceNumberMessage, + Version: bm.Version, + Messages: []*m.BroadcastFeedMessage{msg}, + } + // This ensures that only one message is sent with the confirmed sequence number + if i == 0 { + m.ConfirmedSequenceNumberMessage = bm.ConfirmedSequenceNumberMessage } clientDeleteList, err = cm.doBroadcast(m) logError(err, "failed to do broadcast") From ebed210b79746213ef53e89a8a2cb185061c78c5 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 4 Dec 2023 16:53:17 +1300 Subject: [PATCH 41/71] add comment explaining additional doBroadcast call for confirmed messages --- wsbroadcastserver/clientmanager.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 1777da642c..432864dd60 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -337,6 +337,8 @@ func (cm *ClientManager) Start(parentCtx context.Context) { logError(err, "failed to do broadcast") } + // A message with ConfirmedSequenceNumberMessage could be sent without any messages + // this section ensures that message is still sent. if len(bm.Messages) == 0 { clientDeleteList, err = cm.doBroadcast(bm) logError(err, "failed to do broadcast") From fb177175a36afa5c1d8d703a7feec1aa8a319e34 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 4 Dec 2023 17:02:50 +1300 Subject: [PATCH 42/71] move last sent sequence number check from client manager to client connection --- wsbroadcastserver/clientconnection.go | 5 +++++ wsbroadcastserver/clientmanager.go | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 9036fc8f2c..8069c847b1 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -226,6 +226,11 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { case <-ctx.Done(): return case msg := <-cc.out: + if msg.sequenceNumber != nil && uint64(*msg.sequenceNumber) <= cc.LastSentSeqNum.Load() { + log.Debug("client has already sent message with this sequence number, skipping the message", "client", client.Name, "sequence number", *seqNum) + continue + } + expSeqNum := cc.LastSentSeqNum.Load() + 1 if !cc.backlogSent && msg.sequenceNumber != nil && uint64(*msg.sequenceNumber) > expSeqNum { catchupSeqNum := uint64(*msg.sequenceNumber) - 1 diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 432864dd60..82113874c4 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -197,11 +197,6 @@ func (cm *ClientManager) doBroadcast(bm *m.BroadcastMessage) ([]*ClientConnectio return nil, fmt.Errorf("doBroadcast was sent %d BroadcastFeedMessages, it can only parse 1 BroadcastFeedMessage at a time", n) } - if seqNum != nil && uint64(*seqNum) <= client.LastSentSeqNum.Load() { - log.Warn("client has already sent message with this sequence number, skipping the message", "client", client.Name, "sequence number", *seqNum) - continue - } - m := message{ sequenceNumber: seqNum, data: data, From 93144bf2fa572b4965548caf8bb61507ac5842b4 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 4 Dec 2023 17:31:51 +1300 Subject: [PATCH 43/71] fix sending only 1 message on confirm --- wsbroadcastserver/clientmanager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wsbroadcastserver/clientmanager.go b/wsbroadcastserver/clientmanager.go index 82113874c4..a88716756a 100644 --- a/wsbroadcastserver/clientmanager.go +++ b/wsbroadcastserver/clientmanager.go @@ -319,7 +319,7 @@ func (cm *ClientManager) Start(parentCtx context.Context) { } case bm := <-cm.broadcastChan: var err error - for _, msg := range bm.Messages { + for i, msg := range bm.Messages { m := &m.BroadcastMessage{ Version: bm.Version, Messages: []*m.BroadcastFeedMessage{msg}, From 42388916747ced7c77c92d5bfb4e681e98a1d31f Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 4 Dec 2023 17:32:05 +1300 Subject: [PATCH 44/71] change warn to debug --- wsbroadcastserver/clientconnection.go | 1 - 1 file changed, 1 deletion(-) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 8069c847b1..0c875f7fcf 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -237,7 +237,6 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { bm, err := cc.backlog.Get(expSeqNum, catchupSeqNum) if err != nil { logWarn(err, fmt.Sprintf("error reading messages %d to %d from backlog", expSeqNum, catchupSeqNum)) - cc.Remove() return } From 0bd866ada50dd3df78aa75b0d380d80acc790920 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 4 Dec 2023 17:55:46 +1300 Subject: [PATCH 45/71] fix moving last sent check --- wsbroadcastserver/clientconnection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wsbroadcastserver/clientconnection.go b/wsbroadcastserver/clientconnection.go index 0c875f7fcf..49cd2af7e6 100644 --- a/wsbroadcastserver/clientconnection.go +++ b/wsbroadcastserver/clientconnection.go @@ -227,7 +227,7 @@ func (cc *ClientConnection) Start(parentCtx context.Context) { return case msg := <-cc.out: if msg.sequenceNumber != nil && uint64(*msg.sequenceNumber) <= cc.LastSentSeqNum.Load() { - log.Debug("client has already sent message with this sequence number, skipping the message", "client", client.Name, "sequence number", *seqNum) + log.Debug("client has already sent message with this sequence number, skipping the message", "client", cc.Name, "sequence number", *msg.sequenceNumber) continue } From 9522e3e47b1719efdb82d28069c9e9742fd43943 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 10 Dec 2023 14:39:48 -0700 Subject: [PATCH 46/71] Finalize ArbOS 11 in the node software --- arbos/arbosState/arbosstate.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 8702c62d16..46d2c6f3fe 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -300,14 +300,6 @@ func (state *ArbosState) UpgradeArbosVersion( l1pricing.L1PricerFundsPoolAddress, ))) case 10: - if !chainConfig.DebugMode() { - // This upgrade isn't finalized so we only want to support it for testing - return fmt.Errorf( - "the chain is upgrading to unsupported ArbOS version %v, %w", - state.arbosVersion+1, - ErrFatalNodeOutOfDate, - ) - } // Update the PerBatchGasCost to a more accurate value compared to the old v6 default. ensure(state.l1PricingState.SetPerBatchGasCost(l1pricing.InitialPerBatchGasCostV12)) From 4f377a90a62e723711da50e57b276e4d0364bf78 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 10 Dec 2023 14:54:50 -0700 Subject: [PATCH 47/71] Add support for consensus v11 to Dockerfile --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index b966125b66..b62e569259 100644 --- a/Dockerfile +++ b/Dockerfile @@ -164,6 +164,7 @@ RUN ./download-machine.sh consensus-v10 0x6b94a7fc388fd8ef3def759297828dc311761e RUN ./download-machine.sh consensus-v10.1 0xda4e3ad5e7feacb817c21c8d0220da7650fe9051ece68a3f0b1c5d38bbb27b21 RUN ./download-machine.sh consensus-v10.2 0x0754e09320c381566cc0449904c377a52bd34a6b9404432e80afd573b67f7b17 RUN ./download-machine.sh consensus-v10.3 0xf559b6d4fa869472dabce70fe1c15221bdda837533dfd891916836975b434dec +RUN ./download-machine.sh consensus-v11 0xf4389b835497a910d7ba3ebfb77aa93da985634f3c052de1290360635be40c4a FROM golang:1.20-bullseye as node-builder WORKDIR /workspace From e7cbd35f6d8fcc80f356629e5190301fc8dd3443 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Sun, 10 Dec 2023 15:00:11 -0700 Subject: [PATCH 48/71] Push back ArbOS 12 to ArbOS 20 to give room for orbit --- arbos/arbosState/arbosstate.go | 4 +++- precompiles/precompile.go | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/arbos/arbosState/arbosstate.go b/arbos/arbosState/arbosstate.go index 46d2c6f3fe..caf44e2a99 100644 --- a/arbos/arbosState/arbosstate.go +++ b/arbos/arbosState/arbosstate.go @@ -315,7 +315,9 @@ func (state *ArbosState) UpgradeArbosVersion( if !firstTime { ensure(state.chainOwners.ClearList()) } - case 11: + // ArbOS versions 12 through 19 are left to Orbit chains for custom upgrades. + // TODO: currently you can't get to ArbOS 20 without hitting the default case. + case 19: if !chainConfig.DebugMode() { // This upgrade isn't finalized so we only want to support it for testing return fmt.Errorf( diff --git a/precompiles/precompile.go b/precompiles/precompile.go index ded90ebdf7..330eb9a2e4 100644 --- a/precompiles/precompile.go +++ b/precompiles/precompile.go @@ -554,7 +554,7 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwnerPublic := insert(MakePrecompile(templates.ArbOwnerPublicMetaData, &ArbOwnerPublic{Address: hex("6b")})) ArbOwnerPublic.methodsByName["GetInfraFeeAccount"].arbosVersion = 5 ArbOwnerPublic.methodsByName["RectifyChainOwner"].arbosVersion = 11 - ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = 12 + ArbOwnerPublic.methodsByName["GetBrotliCompressionLevel"].arbosVersion = 20 ArbRetryableImpl := &ArbRetryableTx{Address: types.ArbRetryableTxAddress} ArbRetryable := insert(MakePrecompile(templates.ArbRetryableTxMetaData, ArbRetryableImpl)) @@ -590,7 +590,7 @@ func Precompiles() map[addr]ArbosPrecompile { ArbOwner.methodsByName["SetInfraFeeAccount"].arbosVersion = 5 ArbOwner.methodsByName["ReleaseL1PricerSurplusFunds"].arbosVersion = 10 ArbOwner.methodsByName["SetChainConfig"].arbosVersion = 11 - ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = 12 + ArbOwner.methodsByName["SetBrotliCompressionLevel"].arbosVersion = 20 insert(ownerOnly(ArbOwnerImpl.Address, ArbOwner, emitOwnerActs)) insert(debugOnly(MakePrecompile(templates.ArbDebugMetaData, &ArbDebug{Address: hex("ff")}))) From fed7de239389a8b35e5e644d70aac6a5ebde1afc Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 11 Dec 2023 08:34:49 +0000 Subject: [PATCH 49/71] remove recursive lock calls from backlogSegment object --- broadcaster/backlog/backlog.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index d39cde29a6..18d3af2e38 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -277,6 +277,12 @@ func IsBacklogSegmentNil(segment BacklogSegment) bool { func (s *backlogSegment) Start() uint64 { s.messagesLock.RLock() defer s.messagesLock.RUnlock() + return s.start() +} + +// start allows the first message to be retrieved from functions that already +// have the messagesLock. +func (s *backlogSegment) start() uint64 { if len(s.messages) > 0 { return uint64(s.messages[0].SequenceNumber) } @@ -317,7 +323,7 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro s.messagesLock.RLock() defer s.messagesLock.RUnlock() noMsgs := []*m.BroadcastFeedMessage{} - if start < s.Start() { + if start < s.start() { return noMsgs, errOutOfBounds } @@ -325,8 +331,8 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro return noMsgs, errOutOfBounds } - startIndex := start - s.Start() - endIndex := end - s.Start() + 1 + startIndex := start - s.start() + endIndex := end - s.start() + 1 tmp := make([]*m.BroadcastFeedMessage, len(s.messages)) copy(tmp, s.messages) @@ -359,7 +365,7 @@ func (s *backlogSegment) append(prevMsgIdx uint64, msg *m.BroadcastFeedMessage) func (s *backlogSegment) Contains(i uint64) bool { s.messagesLock.RLock() defer s.messagesLock.RUnlock() - start := s.Start() + start := s.start() if i < start || i > s.End() { return false } From 5a7ac9eabb2a0ec8b06faba991f67393054562db Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 11 Dec 2023 08:36:18 +0000 Subject: [PATCH 50/71] reload head after lookup call --- broadcaster/backlog/backlog.go | 1 + 1 file changed, 1 insertion(+) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 18d3af2e38..e25d32136d 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -112,6 +112,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { } segment, err := b.Lookup(start) + head := b.head.Load() if start < head.Start() { // doing this check after the Lookup call ensures there is no race // condition with a delete call From 3b4c9b6d5137580e497c5dfb911c236b729bed22 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 11 Dec 2023 09:03:51 +0000 Subject: [PATCH 51/71] fix no new var error --- broadcaster/backlog/backlog.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index e25d32136d..1b9f6e8ddf 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -112,7 +112,7 @@ func (b *backlog) Get(start, end uint64) (*m.BroadcastMessage, error) { } segment, err := b.Lookup(start) - head := b.head.Load() + head = b.head.Load() if start < head.Start() { // doing this check after the Lookup call ensures there is no race // condition with a delete call From da08db5a02fc1e99ff47af628ac54f7b232b3622 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Mon, 11 Dec 2023 11:14:30 +0000 Subject: [PATCH 52/71] change copy slice to only copy the selected elements --- broadcaster/backlog/backlog.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index 1b9f6e8ddf..dee3aa92e0 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -335,9 +335,9 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro startIndex := start - s.start() endIndex := end - s.start() + 1 - tmp := make([]*m.BroadcastFeedMessage, len(s.messages)) - copy(tmp, s.messages) - return tmp[startIndex:endIndex], nil + tmp := make([]*m.BroadcastFeedMessage, endIndex-startIndex) + copy(tmp, s.messages[startIndex:endIndex]) + return tmp, nil } // append appends the given BroadcastFeedMessage to messages if it is the first From 13aecae7820c1832a390909e6d5d646c7238a157 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 11 Dec 2023 10:49:57 -0700 Subject: [PATCH 53/71] Fix FloatToBig overflowing on values greater than an int64 --- arbnode/dataposter/data_poster.go | 26 ++++++++++++++++++++------ util/arbmath/math.go | 3 ++- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 4138aa8400..bf12d109ec 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -10,6 +10,7 @@ import ( "crypto/x509" "errors" "fmt" + "math" "math/big" "net/http" "os" @@ -378,13 +379,26 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur "ElapsedTimeBase": float64(config.ElapsedTimeBase), "ElapsedTimeImportance": config.ElapsedTimeImportance, "TargetPriceGWei": config.TargetPriceGwei, - "GWei": params.GWei, } result, err := p.maxFeeCapExpression.Evaluate(parameters) if err != nil { return nil, fmt.Errorf("error evaluating maxFeeCapExpression: %w", err) } - return arbmath.FloatToBig(result.(float64)), nil + resultFloat, ok := result.(float64) + if !ok { + return nil, fmt.Errorf("maxFeeCapExpression evaluated to non-float64: %v", result) + } + // 1e9 gwei gas price is practically speaking an infinite gas price, so we cap it there. + // This also allows the formula to return positive infinity safely. + resultFloat = math.Min(resultFloat, 1e9) + resultBig := arbmath.FloatToBig(resultFloat * params.GWei) + if resultBig == nil { + return nil, fmt.Errorf("maxFeeCapExpression evaluated to float64 not convertible to integer: %v", resultFloat) + } + if resultBig.Sign() <= 0 { + return nil, fmt.Errorf("maxFeeCapExpression evaluated <= 0: %v", resultFloat) + } + return resultBig, nil } func (p *DataPoster) feeAndTipCaps(ctx context.Context, nonce uint64, gasLimit uint64, lastFeeCap *big.Int, lastTipCap *big.Int, dataCreatedAt time.Time, backlogOfBatches uint64) (*big.Int, *big.Int, error) { @@ -839,9 +853,9 @@ func DataPosterConfigAddOptions(prefix string, f *pflag.FlagSet, defaultDataPost f.Bool(prefix+".use-db-storage", defaultDataPosterConfig.UseDBStorage, "uses database storage when enabled") f.Bool(prefix+".use-noop-storage", defaultDataPosterConfig.UseNoOpStorage, "uses noop storage, it doesn't store anything") f.Bool(prefix+".legacy-storage-encoding", defaultDataPosterConfig.LegacyStorageEncoding, "encodes items in a legacy way (as it was before dropping generics)") - f.String(prefix+".max-fee-cap-formula", defaultDataPosterConfig.MaxFeeCapFormula, "mathematical formula to calculate maximum fee cap the result of which would be float64.\n"+ + f.String(prefix+".max-fee-cap-formula", defaultDataPosterConfig.MaxFeeCapFormula, "mathematical formula to calculate maximum fee cap gwei the result of which would be float64.\n"+ "This expression is expected to be evaluated please refer https://github.com/Knetic/govaluate/blob/master/MANUAL.md to find all available mathematical operators.\n"+ - "Currently available variables to construct the formula are BacklogOfBatches, UrgencyGWei, ElapsedTime, ElapsedTimeBase, ElapsedTimeImportance, TargetPriceGWei and GWei") + "Currently available variables to construct the formula are BacklogOfBatches, UrgencyGWei, ElapsedTime, ElapsedTimeBase, ElapsedTimeImportance, and TargetPriceGWei") f.Duration(prefix+".elapsed-time-base", defaultDataPosterConfig.ElapsedTimeBase, "unit to measure the time elapsed since creation of transaction used for maximum fee cap calculation") f.Float64(prefix+".elapsed-time-importance", defaultDataPosterConfig.ElapsedTimeImportance, "weight given to the units of time elapsed used for maximum fee cap calculation") @@ -878,7 +892,7 @@ var DefaultDataPosterConfig = DataPosterConfig{ LegacyStorageEncoding: false, Dangerous: DangerousConfig{ClearDBStorage: false}, ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, - MaxFeeCapFormula: "(((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei) * GWei", + MaxFeeCapFormula: "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei", ElapsedTimeBase: 10 * time.Minute, ElapsedTimeImportance: 10, } @@ -904,7 +918,7 @@ var TestDataPosterConfig = DataPosterConfig{ UseNoOpStorage: false, LegacyStorageEncoding: false, ExternalSigner: ExternalSignerCfg{Method: "eth_signTransaction"}, - MaxFeeCapFormula: "(((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei) * GWei", + MaxFeeCapFormula: "((BacklogOfBatches * UrgencyGWei) ** 2) + ((ElapsedTime/ElapsedTimeBase) ** 2) * ElapsedTimeImportance + TargetPriceGWei", ElapsedTimeBase: 10 * time.Minute, ElapsedTimeImportance: 10, } diff --git a/util/arbmath/math.go b/util/arbmath/math.go index 2945dd5a29..a5618438cc 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -76,7 +76,8 @@ func UintToBig(value uint64) *big.Int { // FloatToBig casts a float to a huge func FloatToBig(value float64) *big.Int { - return new(big.Int).SetInt64(int64(value)) + result, _ := new(big.Float).SetFloat64(value).Int(nil) + return result } // UintToBigFloat casts a uint to a big float From 2ac0bfdee2f2afe28991fcedfb09e23f7fe19210 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 11 Dec 2023 11:09:55 -0700 Subject: [PATCH 54/71] Add support for nethermind's version of "execution reverted" --- cmd/nitro/nitro.go | 4 +--- staker/rollup_watcher.go | 7 ++----- .../headerreader}/execution_reverted_test.go | 11 ++++++++--- util/headerreader/header_reader.go | 4 ++++ 4 files changed, 15 insertions(+), 11 deletions(-) rename {staker => util/headerreader}/execution_reverted_test.go (66%) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index c7f9d57b08..86093a85d9 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -14,7 +14,6 @@ import ( "os/signal" "path/filepath" "reflect" - "regexp" "strings" "syscall" "time" @@ -546,7 +545,6 @@ func mainImpl() int { // Validate sequencer's MaxTxDataSize and batchPoster's MaxSize params. // SequencerInbox's maxDataSize is defaulted to 117964 which is 90% of Geth's 128KB tx size limit, leaving ~13KB for proving. seqInboxMaxDataSize := 117964 - executionRevertedRegexp := regexp.MustCompile("(?i)execution reverted") if nodeConfig.Node.ParentChainReader.Enable { seqInbox, err := bridgegen.NewSequencerInbox(rollupAddrs.SequencerInbox, l1Client) if err != nil { @@ -556,7 +554,7 @@ func mainImpl() int { res, err := seqInbox.MaxDataSize(&bind.CallOpts{Context: ctx}) if err == nil { seqInboxMaxDataSize = int(res.Int64()) - } else if !executionRevertedRegexp.MatchString(err.Error()) { + } else if !headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { log.Error("error fetching MaxDataSize from sequencer inbox", "err", err) return 1 } diff --git a/staker/rollup_watcher.go b/staker/rollup_watcher.go index 59a23d891d..c9fa3b132d 100644 --- a/staker/rollup_watcher.go +++ b/staker/rollup_watcher.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "math/big" - "regexp" "sync/atomic" "github.com/ethereum/go-ethereum/common" @@ -17,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/solgen/go/rollupgen" + "github.com/offchainlabs/nitro/util/headerreader" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -72,9 +72,6 @@ func (r *RollupWatcher) getCallOpts(ctx context.Context) *bind.CallOpts { return &opts } -// A regexp matching "execution reverted" errors returned from the parent chain RPC. -var executionRevertedRegexp = regexp.MustCompile("(?i)execution reverted") - func (r *RollupWatcher) getNodeCreationBlock(ctx context.Context, nodeNum uint64) (*big.Int, error) { callOpts := r.getCallOpts(ctx) if !r.unSupportedL3Method.Load() { @@ -83,7 +80,7 @@ func (r *RollupWatcher) getNodeCreationBlock(ctx context.Context, nodeNum uint64 return createdAtBlock, nil } log.Trace("failed to call getNodeCreationBlockForLogLookup, falling back on node CreatedAtBlock field", "err", err) - if executionRevertedRegexp.MatchString(err.Error()) { + if headerreader.ExecutionRevertedRegexp.MatchString(err.Error()) { r.unSupportedL3Method.Store(true) } else { return nil, err diff --git a/staker/execution_reverted_test.go b/util/headerreader/execution_reverted_test.go similarity index 66% rename from staker/execution_reverted_test.go rename to util/headerreader/execution_reverted_test.go index 98a3bdfd61..d9b73b844e 100644 --- a/staker/execution_reverted_test.go +++ b/util/headerreader/execution_reverted_test.go @@ -1,4 +1,7 @@ -package staker +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package headerreader import ( "io" @@ -13,14 +16,16 @@ func TestExecutionRevertedRegexp(t *testing.T) { "execution reverted: FOO", // besu returns "Execution reverted" "Execution reverted", + // nethermind returns "VM execution error." + "VM execution error.", } for _, errString := range executionRevertedErrors { - if !executionRevertedRegexp.MatchString(errString) { + if !ExecutionRevertedRegexp.MatchString(errString) { t.Fatalf("execution reverted regexp didn't match %q", errString) } } // This regexp should not match random IO errors - if executionRevertedRegexp.MatchString(io.ErrUnexpectedEOF.Error()) { + if ExecutionRevertedRegexp.MatchString(io.ErrUnexpectedEOF.Error()) { t.Fatal("execution reverted regexp matched unexpected EOF") } } diff --git a/util/headerreader/header_reader.go b/util/headerreader/header_reader.go index 678c3099fa..a5100f74fa 100644 --- a/util/headerreader/header_reader.go +++ b/util/headerreader/header_reader.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "math/big" + "regexp" "sync" "time" @@ -22,6 +23,9 @@ import ( flag "github.com/spf13/pflag" ) +// A regexp matching "execution reverted" errors returned from the parent chain RPC. +var ExecutionRevertedRegexp = regexp.MustCompile(`(?i)execution reverted|VM execution error\.?`) + type ArbSysInterface interface { ArbBlockNumber(*bind.CallOpts) (*big.Int, error) } From a1898787cf31accf8328a898afc4ac20961b9a45 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 11 Dec 2023 11:54:41 -0700 Subject: [PATCH 55/71] rpcclient: enable retries by default --- util/rpcclient/rpcclient.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/util/rpcclient/rpcclient.go b/util/rpcclient/rpcclient.go index 61c125eaed..dee6e9252a 100644 --- a/util/rpcclient/rpcclient.go +++ b/util/rpcclient/rpcclient.go @@ -53,6 +53,8 @@ var TestClientConfig = ClientConfig{ var DefaultClientConfig = ClientConfig{ URL: "self-auth", JWTSecret: "", + Retries: 3, + RetryErrors: "websocket: close.*|dial tcp .*|.*i/o timeout|.*connection reset by peer|.*connection refused", ArgLogLimit: 2048, } From 67d99d315cdcd3769598bc0e84e8a45a5b7d5478 Mon Sep 17 00:00:00 2001 From: Tsahi Zidenberg Date: Mon, 11 Dec 2023 11:54:59 -0700 Subject: [PATCH 56/71] rpcclient: use default config in toxiproxy test --- util/rpcclient/rpcclient_toxiproxy_test.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/util/rpcclient/rpcclient_toxiproxy_test.go b/util/rpcclient/rpcclient_toxiproxy_test.go index 31385d50d8..647a37d691 100644 --- a/util/rpcclient/rpcclient_toxiproxy_test.go +++ b/util/rpcclient/rpcclient_toxiproxy_test.go @@ -22,15 +22,12 @@ func TestToxiRpcClient(t *testing.T) { Require(t, err) defer proxy.Delete() - config := &ClientConfig{ - URL: "ws://" + proxy.Listen, - Timeout: time.Second * 5, - Retries: 3, - RetryErrors: "websocket: close.*|.* i/o timeout|.*connection reset by peer|dial tcp .*", - RetryDelay: time.Millisecond * 500, - } + config := DefaultClientConfig + config.URL = "ws://" + proxy.Listen + config.Timeout = time.Second * 5 + config.RetryDelay = time.Millisecond * 500 Require(t, config.Validate()) - configFetcher := func() *ClientConfig { return config } + configFetcher := func() *ClientConfig { return &config } client := NewRpcClient(configFetcher, server1) From fa0fd2800fd03c1f797c996504593870c432ec20 Mon Sep 17 00:00:00 2001 From: Joshua Colvin Date: Mon, 11 Dec 2023 12:16:12 -0700 Subject: [PATCH 57/71] Update util/headerreader/execution_reverted_test.go --- util/headerreader/execution_reverted_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/headerreader/execution_reverted_test.go b/util/headerreader/execution_reverted_test.go index d9b73b844e..d7bebd38b8 100644 --- a/util/headerreader/execution_reverted_test.go +++ b/util/headerreader/execution_reverted_test.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022, Offchain Labs, Inc. +// Copyright 2021-2023, Offchain Labs, Inc. // For license information, see https://github.com/nitro/blob/master/LICENSE package headerreader From a59891ddb810422dcc4eef23e2538b9b556c7d2b Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 11 Dec 2023 13:29:33 -0700 Subject: [PATCH 58/71] Allow for zero max fee cap from expression --- arbnode/dataposter/data_poster.go | 4 ++-- arbnode/dataposter/dataposter_test.go | 11 ++++++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index bf12d109ec..f1ba185e32 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -395,8 +395,8 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur if resultBig == nil { return nil, fmt.Errorf("maxFeeCapExpression evaluated to float64 not convertible to integer: %v", resultFloat) } - if resultBig.Sign() <= 0 { - return nil, fmt.Errorf("maxFeeCapExpression evaluated <= 0: %v", resultFloat) + if resultBig.Sign() < 0 { + return nil, fmt.Errorf("maxFeeCapExpression evaluated < 0: %v", resultFloat) } return resultBig, nil } diff --git a/arbnode/dataposter/dataposter_test.go b/arbnode/dataposter/dataposter_test.go index 8c18288e5f..74b4aff18e 100644 --- a/arbnode/dataposter/dataposter_test.go +++ b/arbnode/dataposter/dataposter_test.go @@ -19,6 +19,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/signer/core/apitypes" "github.com/google/go-cmp/cmp" @@ -262,6 +263,14 @@ func TestMaxFeeCapFormulaCalculation(t *testing.T) { t.Fatalf("Error evaluating MaxFeeCap expression: %v", err) } if result.Cmp(common.Big0) != 0 { - t.Fatalf("Unexpected result. Got: %d, want: 0", result.Uint64()) + t.Fatalf("Unexpected result. Got: %d, want: 0", result) + } + + result, err = p.evalMaxFeeCapExpr(0, time.Since(time.Time{})) + if err != nil { + t.Fatalf("Error evaluating MaxFeeCap expression: %v", err) + } + if result.Cmp(big.NewInt(params.GWei)) <= 0 { + t.Fatalf("Unexpected result. Got: %d, want: >0", result) } } From 650bed6de8c98d3020b71a21c7687fc8d5e2142f Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 11 Dec 2023 14:35:33 -0700 Subject: [PATCH 59/71] Fix data poster time RLP encoding --- arbnode/dataposter/dbstorage/storage.go | 3 +- arbnode/dataposter/slice/slicestorage.go | 3 +- arbnode/dataposter/storage/storage.go | 42 ++++++++++++++++++++++++ arbnode/dataposter/storage/time.go | 34 +++++++++++++++++++ arbnode/dataposter/storage_test.go | 19 +++++++++++ 5 files changed, 99 insertions(+), 2 deletions(-) create mode 100644 arbnode/dataposter/storage/time.go diff --git a/arbnode/dataposter/dbstorage/storage.go b/arbnode/dataposter/dbstorage/storage.go index f2b1854492..473bfa2c3b 100644 --- a/arbnode/dataposter/dbstorage/storage.go +++ b/arbnode/dataposter/dbstorage/storage.go @@ -6,6 +6,7 @@ package dbstorage import ( "bytes" "context" + "encoding/hex" "errors" "fmt" "strconv" @@ -140,7 +141,7 @@ func (s *Storage) Put(ctx context.Context, index uint64, prev, new *storage.Queu return fmt.Errorf("encoding previous item: %w", err) } if !bytes.Equal(stored, prevEnc) { - return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, stored, prevEnc) + return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, hex.EncodeToString(stored), hex.EncodeToString(prevEnc)) } newEnc, err := s.encDec().Encode(new) if err != nil { diff --git a/arbnode/dataposter/slice/slicestorage.go b/arbnode/dataposter/slice/slicestorage.go index 04286df411..dbd7a3ea5e 100644 --- a/arbnode/dataposter/slice/slicestorage.go +++ b/arbnode/dataposter/slice/slicestorage.go @@ -6,6 +6,7 @@ package slice import ( "bytes" "context" + "encoding/hex" "errors" "fmt" @@ -90,7 +91,7 @@ func (s *Storage) Put(_ context.Context, index uint64, prev, new *storage.Queued return fmt.Errorf("encoding previous item: %w", err) } if !bytes.Equal(prevEnc, s.queue[queueIdx]) { - return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, s.queue[queueIdx], prevEnc) + return fmt.Errorf("replacing different item than expected at index: %v, stored: %v, prevEnc: %v", index, hex.EncodeToString(s.queue[queueIdx]), hex.EncodeToString(prevEnc)) } s.queue[queueIdx] = newEnc } else { diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index 70637c48e0..b55f6363fa 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -1,8 +1,12 @@ +// Copyright 2021-2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + package storage import ( "errors" "fmt" + "io" "time" "github.com/ethereum/go-ethereum/core/types" @@ -30,6 +34,44 @@ type QueuedTransaction struct { NextReplacement time.Time } +type QueuedTransactionForEncoding struct { + FullTx *types.Transaction + Data types.DynamicFeeTx + Meta []byte + Sent bool + Created *RlpTime `rlp:"optional"` // may be earlier than the tx was given to the tx poster + NextReplacement *RlpTime `rlp:"optional"` +} + +func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, QueuedTransactionForEncoding{ + FullTx: qt.FullTx, + Data: qt.Data, + Meta: qt.Meta, + Sent: qt.Sent, + Created: (*RlpTime)(&qt.Created), + NextReplacement: (*RlpTime)(&qt.NextReplacement), + }) +} + +func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { + var qtEnc QueuedTransactionForEncoding + if err := s.Decode(&qtEnc); err != nil { + return err + } + qt.FullTx = qtEnc.FullTx + qt.Data = qtEnc.Data + qt.Meta = qtEnc.Meta + qt.Sent = qtEnc.Sent + if qtEnc.Created != nil { + qt.Created = time.Time(*qtEnc.Created) + } + if qtEnc.NextReplacement != nil { + qt.NextReplacement = time.Time(*qtEnc.NextReplacement) + } + return nil +} + // LegacyQueuedTransaction is used for backwards compatibility. // Before https://github.com/OffchainLabs/nitro/pull/1773: the queuedTransaction // looked like this and was rlp encoded directly. After the pr, we are store diff --git a/arbnode/dataposter/storage/time.go b/arbnode/dataposter/storage/time.go new file mode 100644 index 0000000000..d9911de997 --- /dev/null +++ b/arbnode/dataposter/storage/time.go @@ -0,0 +1,34 @@ +// Copyright 2021-2023, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +package storage + +import ( + "io" + "time" + + "github.com/ethereum/go-ethereum/rlp" +) + +// time.Time doesn't encode as anything in RLP. This fixes that. +// It encodes a timestamp as a uint64 unix timestamp in seconds, +// so any subsecond precision is lost. +type RlpTime time.Time + +func (b *RlpTime) DecodeRLP(s *rlp.Stream) error { + var nanos uint64 + err := s.Decode(&nanos) + if err != nil { + return err + } + *b = RlpTime(time.Unix(int64(nanos), 0)) + return nil +} + +func (b RlpTime) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, uint64(time.Time(b).Unix())) +} + +func (b RlpTime) String() string { + return time.Time(b).String() +} diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index adea2073e2..ff5623f778 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -5,6 +5,7 @@ import ( "math/big" "path" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" @@ -366,3 +367,21 @@ func TestLength(t *testing.T) { } } + +func TestTimeEncoding(t *testing.T) { + // RlpTime cuts off subsecond precision, so for this test, + // we'll use a time that doesn't have any subsecond precision. + now := storage.RlpTime(time.Unix(time.Now().Unix(), 0)) + enc, err := rlp.EncodeToBytes(now) + if err != nil { + t.Fatal("failed to encode time", err) + } + var dec storage.RlpTime + err = rlp.DecodeBytes(enc, &dec) + if err != nil { + t.Fatal("failed to decode time", err) + } + if !time.Time(dec).Equal(time.Time(now)) { + t.Fatalf("time %v encoded then decoded to %v", now, dec) + } +} From 0bc7f5035d16de248f4d8316cc1185916c398445 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 11 Dec 2023 14:43:33 -0700 Subject: [PATCH 60/71] Always saveTx in sendTx --- arbnode/dataposter/data_poster.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index 4138aa8400..f535279118 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -537,10 +537,8 @@ func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTr } func (p *DataPoster) sendTx(ctx context.Context, prevTx *storage.QueuedTransaction, newTx *storage.QueuedTransaction) error { - if prevTx == nil || (newTx.FullTx.Hash() != prevTx.FullTx.Hash()) { - if err := p.saveTx(ctx, prevTx, newTx); err != nil { - return err - } + if err := p.saveTx(ctx, prevTx, newTx); err != nil { + return err } if err := p.client.SendTransaction(ctx, newTx.FullTx); err != nil { if !strings.Contains(err.Error(), "already known") && !strings.Contains(err.Error(), "nonce too low") { From f7275cfb109041e102858a54f2778c94a0b794b8 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Mon, 11 Dec 2023 17:05:29 -0700 Subject: [PATCH 61/71] Make saveTx skipping check comprehensive --- arbnode/dataposter/data_poster.go | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index f535279118..23b63cad46 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -5,6 +5,7 @@ package dataposter import ( + "bytes" "context" "crypto/tls" "crypto/x509" @@ -527,8 +528,24 @@ func (p *DataPoster) PostTransaction(ctx context.Context, dataCreatedAt time.Tim // the mutex must be held by the caller func (p *DataPoster) saveTx(ctx context.Context, prevTx, newTx *storage.QueuedTransaction) error { - if prevTx != nil && prevTx.Data.Nonce != newTx.Data.Nonce { - return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) + if prevTx != nil { + if prevTx.Data.Nonce != newTx.Data.Nonce { + return fmt.Errorf("prevTx nonce %v doesn't match newTx nonce %v", prevTx.Data.Nonce, newTx.Data.Nonce) + } + + // Check if prevTx is the same as newTx and we don't need to do anything + oldEnc, err := rlp.EncodeToBytes(prevTx) + if err != nil { + return fmt.Errorf("failed to encode prevTx: %w", err) + } + newEnc, err := rlp.EncodeToBytes(newTx) + if err != nil { + return fmt.Errorf("failed to encode newTx: %w", err) + } + if bytes.Equal(oldEnc, newEnc) { + // No need to save newTx as it's the same as prevTx + return nil + } } if err := p.queue.Put(ctx, newTx.Data.Nonce, prevTx, newTx); err != nil { return fmt.Errorf("putting new tx in the queue: %w", err) From c3c08335b73f5180ab9536642ae7c4e9ccc022fc Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 12 Dec 2023 14:14:42 +0530 Subject: [PATCH 62/71] Validate execution node config at startup and update --dev flag parameter --- cmd/nitro/nitro.go | 3 +++ cmd/util/confighelpers/configuration.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 86093a85d9..08d80cff15 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -786,6 +786,9 @@ func (c *NodeConfig) Validate() error { if err := c.Node.Validate(); err != nil { return err } + if err := c.Execution.Validate(); err != nil { + return err + } return c.Persistent.Validate() } diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 85a8f4adef..81e124d9b9 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -147,7 +147,7 @@ func devFlagArgs() []string { "--parent-chain.id=1337", "--chain.id=412346", "--persistent.chain", "/tmp/dev-test", - "--node.sequencer", + "--execution.sequencer.enable=true", "--node.dangerous.no-sequencer-coordinator", "--node.staker.enable=false", "--init.empty=false", From 698b448fd667f3fd3c674246513a7940a20d7dfb Mon Sep 17 00:00:00 2001 From: ganeshvanahalli Date: Tue, 12 Dec 2023 15:16:23 +0530 Subject: [PATCH 63/71] fix using of sequencer-url as default forwarding-target --- cmd/nitro/nitro.go | 3 ++- cmd/util/confighelpers/configuration.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 08d80cff15..3efba5df28 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -893,7 +893,8 @@ func applyChainParameters(ctx context.Context, k *koanf.Koanf, chainId uint64, c "chain.id": chainInfo.ChainConfig.ChainID.Uint64(), "parent-chain.id": chainInfo.ParentChainId, } - if chainInfo.SequencerUrl != "" { + // Only use chainInfo.SequencerUrl as default forwarding-target if sequencer is not enabled + if !k.Bool("execution.sequencer.enable") && chainInfo.SequencerUrl != "" { chainDefaults["execution.forwarding-target"] = chainInfo.SequencerUrl } if chainInfo.SecondaryForwardingTarget != "" { diff --git a/cmd/util/confighelpers/configuration.go b/cmd/util/confighelpers/configuration.go index 81e124d9b9..3ff27d65ce 100644 --- a/cmd/util/confighelpers/configuration.go +++ b/cmd/util/confighelpers/configuration.go @@ -147,7 +147,8 @@ func devFlagArgs() []string { "--parent-chain.id=1337", "--chain.id=412346", "--persistent.chain", "/tmp/dev-test", - "--execution.sequencer.enable=true", + "--node.sequencer", + "--execution.sequencer.enable", "--node.dangerous.no-sequencer-coordinator", "--node.staker.enable=false", "--init.empty=false", From 74d889ed531f1aee9fcda3792308ad671dfa5a52 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Tue, 12 Dec 2023 17:53:37 +0100 Subject: [PATCH 64/71] Add prysms recursivelock lint to nitro --- Makefile | 1 + go.mod | 32 +++++++++--------- go.sum | 47 +++++++++++++++++++++++++++ linter/recursivelock/recursivelock.go | 11 +++++++ 4 files changed, 76 insertions(+), 15 deletions(-) create mode 100644 linter/recursivelock/recursivelock.go diff --git a/Makefile b/Makefile index 4221100961..42fd1e3595 100644 --- a/Makefile +++ b/Makefile @@ -307,6 +307,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro # strategic rules to minimize dependency building .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make + go run ./linter/recursivelock ./... go run ./linter/koanf ./... go run ./linter/pointercheck ./... golangci-lint run --fix diff --git a/go.mod b/go.mod index bdda6a61a1..e504542a2f 100644 --- a/go.mod +++ b/go.mod @@ -18,11 +18,11 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 - github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 + github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v3 v3.2103.2 github.com/enescakir/emoji v1.0.0 - github.com/ethereum/go-ethereum v1.10.26 + github.com/ethereum/go-ethereum v1.13.1 github.com/fatih/structtag v1.2.0 github.com/gdamore/tcell/v2 v2.6.0 github.com/google/go-cmp v0.5.9 @@ -73,7 +73,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect @@ -97,7 +97,7 @@ require ( github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 // indirect + github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/emirpasic/gods v1.18.1 // indirect @@ -131,7 +131,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect - github.com/huin/goupnp v1.1.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-block-format v0.1.1 // indirect @@ -195,7 +195,7 @@ require ( github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect github.com/libp2p/go-libp2p-kad-dht v0.21.1 // indirect github.com/libp2p/go-libp2p-kbucket v0.5.0 // indirect - github.com/libp2p/go-libp2p-pubsub v0.9.0 // indirect + github.com/libp2p/go-libp2p-pubsub v0.9.3 // indirect github.com/libp2p/go-libp2p-pubsub-router v0.6.0 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect github.com/libp2p/go-libp2p-routing-helpers v0.6.2 // indirect @@ -238,6 +238,7 @@ require ( github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/procfs v0.9.0 // indirect + github.com/prysmaticlabs/prysm/v4 v4.1.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect github.com/quic-go/qtls-go1-19 v0.3.3 // indirect github.com/quic-go/qtls-go1-20 v0.2.3 // indirect @@ -245,13 +246,13 @@ require ( github.com/quic-go/webtransport-go v0.5.2 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/rhnvrm/simples3 v0.6.1 // indirect - github.com/rivo/uniseg v0.4.3 // indirect + github.com/rivo/uniseg v0.4.4 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/supranational/blst v0.3.11 // indirect - github.com/urfave/cli/v2 v2.24.1 // indirect + github.com/urfave/cli/v2 v2.25.7 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect @@ -261,6 +262,7 @@ require ( github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.7.0 // indirect go.opentelemetry.io/otel/exporters/jaeger v1.7.0 // indirect @@ -308,12 +310,12 @@ require ( require ( github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect - github.com/VictoriaMetrics/fastcache v1.6.0 // indirect + github.com/VictoriaMetrics/fastcache v1.12.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect - github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect - github.com/go-ole/go-ole v1.2.1 // indirect + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-redis/redis/v8 v8.11.4 github.com/go-stack/stack v1.8.1 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect @@ -330,11 +332,11 @@ require ( github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/rs/cors v1.7.0 // indirect - github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 - github.com/tklauser/go-sysconf v0.3.5 // indirect - github.com/tklauser/numcpus v0.2.2 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/tyler-smith/go-bip39 v1.1.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) diff --git a/go.sum b/go.sum index bf8b4b826d..0a9649c434 100644 --- a/go.sum +++ b/go.sum @@ -173,6 +173,8 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= @@ -198,6 +200,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/ceramicnetwork/go-dag-jose v0.1.0 h1:yJ/HVlfKpnD3LdYP03AHyTvbm3BpPiz2oZiOeReJRdU= github.com/ceramicnetwork/go-dag-jose v0.1.0/go.mod h1:qYA1nYt0X8u4XoMAVoOV3upUVKtrxy/I670Dg5F0wjI= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= +github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -225,6 +228,7 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= +github.com/cockroachdb/datadriven v1.0.3-0.20230801171734-e384cf455877 h1:1MLK4YpFtIEo3ZtMA5C795Wtv5VuUnrXX7mQG+aHg6o= github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= @@ -232,6 +236,8 @@ github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZe github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoGMWEhDvS3zToKcDpRsLuRolQJBVGdozk= github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06 h1:T+Np/xtzIjYM/P5NAw0e2Rf1FGvzDau1h54MKvx8G7w= +github.com/cockroachdb/pebble v0.0.0-20230906160148-46873a6a7a06/go.mod h1:bynZ3gvVyhlvjLI7PT6dmZ7g76xzJ7HpxfjgkzCGz6s= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -307,6 +313,8 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE= github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo= +github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4= github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -360,6 +368,7 @@ github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09 github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= @@ -367,6 +376,8 @@ github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZ github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= +github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.6.0 h1:OKbluoP9VYmJwZwq/iLb4BxwKcwGthaa1YNBJIyCySg= @@ -403,6 +414,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -536,6 +549,7 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg= github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b h1:Qcx5LM0fSiks9uCyFZwDBUasd3lxd1RM0GYpL+Li5o4= github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= @@ -640,9 +654,12 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU= github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -1058,6 +1075,8 @@ github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuD github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-pubsub v0.9.0 h1:mcLb4WzwhUG4OKb0rp1/bYMd/DYhvMyzJheQH3LMd1s= github.com/libp2p/go-libp2p-pubsub v0.9.0/go.mod h1:OEsj0Cc/BpkqikXRTrVspWU/Hx7bMZwHP+6vNMd+c7I= +github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo= +github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc= github.com/libp2p/go-libp2p-pubsub-router v0.6.0 h1:D30iKdlqDt5ZmLEYhHELCMRj8b4sFAqrUcshIUvVP/s= github.com/libp2p/go-libp2p-pubsub-router v0.6.0/go.mod h1:FY/q0/RBTKsLA7l4vqC2cbRbOvyDotg8PJQ7j8FDudE= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= @@ -1372,6 +1391,7 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -1381,6 +1401,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= @@ -1464,6 +1486,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prysmaticlabs/prysm/v4 v4.1.1 h1:sbBkgfPzo/SGTJ5IimtsZSGECoRlhbowR1rEhTOdvHo= +github.com/prysmaticlabs/prysm/v4 v4.1.1/go.mod h1:+o907dc4mwEE0wJkQ8RrzCroC+q2WCzdCLtikwonw8c= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= @@ -1488,6 +1512,8 @@ github.com/rivo/tview v0.0.0-20230814110005-ccc2c8119703/go.mod h1:nVwGv4MP47T0j github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1513,6 +1539,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -1588,6 +1616,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= @@ -1596,12 +1625,18 @@ github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3 github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.2.2 h1:oyhllyrScuYI6g+h/zUvNXNp1wy7x8qQy3t/piefldA= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= @@ -1620,6 +1655,8 @@ github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60Nt github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU= github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= +github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= @@ -1684,6 +1721,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= @@ -1886,6 +1925,8 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= @@ -1948,6 +1989,7 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2000,8 +2042,10 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2009,6 +2053,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -2103,6 +2149,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= diff --git a/linter/recursivelock/recursivelock.go b/linter/recursivelock/recursivelock.go new file mode 100644 index 0000000000..9349759cf3 --- /dev/null +++ b/linter/recursivelock/recursivelock.go @@ -0,0 +1,11 @@ +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/recursivelock" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(recursivelock.Analyzer) +} From 8b64fa27317f46785cdae557ea2fb0f68f3ef6dd Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 12 Dec 2023 10:21:58 -0700 Subject: [PATCH 65/71] Add nanoseconds to time storage --- arbnode/dataposter/storage/storage.go | 6 +++--- arbnode/dataposter/storage/time.go | 16 ++++++++++++---- arbnode/dataposter/storage_test.go | 4 +--- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/arbnode/dataposter/storage/storage.go b/arbnode/dataposter/storage/storage.go index b55f6363fa..87bfa8d83d 100644 --- a/arbnode/dataposter/storage/storage.go +++ b/arbnode/dataposter/storage/storage.go @@ -34,7 +34,7 @@ type QueuedTransaction struct { NextReplacement time.Time } -type QueuedTransactionForEncoding struct { +type queuedTransactionForEncoding struct { FullTx *types.Transaction Data types.DynamicFeeTx Meta []byte @@ -44,7 +44,7 @@ type QueuedTransactionForEncoding struct { } func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, QueuedTransactionForEncoding{ + return rlp.Encode(w, queuedTransactionForEncoding{ FullTx: qt.FullTx, Data: qt.Data, Meta: qt.Meta, @@ -55,7 +55,7 @@ func (qt *QueuedTransaction) EncodeRLP(w io.Writer) error { } func (qt *QueuedTransaction) DecodeRLP(s *rlp.Stream) error { - var qtEnc QueuedTransactionForEncoding + var qtEnc queuedTransactionForEncoding if err := s.Decode(&qtEnc); err != nil { return err } diff --git a/arbnode/dataposter/storage/time.go b/arbnode/dataposter/storage/time.go index d9911de997..df07c5cff9 100644 --- a/arbnode/dataposter/storage/time.go +++ b/arbnode/dataposter/storage/time.go @@ -15,18 +15,26 @@ import ( // so any subsecond precision is lost. type RlpTime time.Time +type rlpTimeEncoding struct { + Seconds uint64 + Nanos uint64 +} + func (b *RlpTime) DecodeRLP(s *rlp.Stream) error { - var nanos uint64 - err := s.Decode(&nanos) + var enc rlpTimeEncoding + err := s.Decode(&enc) if err != nil { return err } - *b = RlpTime(time.Unix(int64(nanos), 0)) + *b = RlpTime(time.Unix(int64(enc.Seconds), int64(enc.Nanos))) return nil } func (b RlpTime) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, uint64(time.Time(b).Unix())) + return rlp.Encode(w, rlpTimeEncoding{ + Seconds: uint64(time.Time(b).Unix()), + Nanos: uint64(time.Time(b).Nanosecond()), + }) } func (b RlpTime) String() string { diff --git a/arbnode/dataposter/storage_test.go b/arbnode/dataposter/storage_test.go index ff5623f778..3344215e90 100644 --- a/arbnode/dataposter/storage_test.go +++ b/arbnode/dataposter/storage_test.go @@ -369,9 +369,7 @@ func TestLength(t *testing.T) { } func TestTimeEncoding(t *testing.T) { - // RlpTime cuts off subsecond precision, so for this test, - // we'll use a time that doesn't have any subsecond precision. - now := storage.RlpTime(time.Unix(time.Now().Unix(), 0)) + now := storage.RlpTime(time.Now()) enc, err := rlp.EncodeToBytes(now) if err != nil { t.Fatal("failed to encode time", err) From 34cdd3645291fe29a6442e7ea001d65d31890940 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 12 Dec 2023 12:13:48 -0700 Subject: [PATCH 66/71] Comment that we should always get a float from expression --- arbnode/dataposter/data_poster.go | 1 + 1 file changed, 1 insertion(+) diff --git a/arbnode/dataposter/data_poster.go b/arbnode/dataposter/data_poster.go index f1ba185e32..c079ebca4d 100644 --- a/arbnode/dataposter/data_poster.go +++ b/arbnode/dataposter/data_poster.go @@ -386,6 +386,7 @@ func (p *DataPoster) evalMaxFeeCapExpr(backlogOfBatches uint64, elapsed time.Dur } resultFloat, ok := result.(float64) if !ok { + // This shouldn't be possible because we only pass in float64s as arguments return nil, fmt.Errorf("maxFeeCapExpression evaluated to non-float64: %v", result) } // 1e9 gwei gas price is practically speaking an infinite gas price, so we cap it there. From 2a059e78ed00ad9757913df834f6c18a1a40df66 Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Tue, 12 Dec 2023 12:14:04 -0700 Subject: [PATCH 67/71] Handle NaN in FloatToBig and comment on when it can return nil --- util/arbmath/math.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/util/arbmath/math.go b/util/arbmath/math.go index a5618438cc..3d1df0ce64 100644 --- a/util/arbmath/math.go +++ b/util/arbmath/math.go @@ -75,7 +75,11 @@ func UintToBig(value uint64) *big.Int { } // FloatToBig casts a float to a huge +// Returns nil when passed NaN or Infinity func FloatToBig(value float64) *big.Int { + if math.IsNaN(value) { + return nil + } result, _ := new(big.Float).SetFloat64(value).Int(nil) return result } From 0638a0b15bdaea8903b732ecf149712ec67ddf21 Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 13 Dec 2023 16:14:39 +0100 Subject: [PATCH 68/71] Add linters from prysm --- Makefile | 38 +++++++++++++++++++ arbos/l1pricing/l1pricing.go | 6 +-- go.mod | 2 + go.sum | 16 ++++++++ linter/comparesame/comparesame.go | 12 ++++++ linter/cryptorand/cryptorand.go | 13 +++++++ linter/errcheck/errcheck.go | 12 ++++++ linter/featureconfig/featureconfig.go | 12 ++++++ linter/gocognit/gocognit.go | 13 +++++++ linter/ineffassign/ineffassign.go | 12 ++++++ linter/interfacechecker/interfacechecker.go | 12 ++++++ linter/logruswitherror/logruswitherror.go | 12 ++++++ linter/maligned/maligned.go | 12 ++++++ linter/properpermissions/properpermissions.go | 12 ++++++ linter/recursivelock/recursivelock.go | 1 + linter/shadowpredecl/shadowpredecl.go | 12 ++++++ linter/slicedirect/slicedirect.go | 12 ++++++ linter/uintcast/uintcast.go | 12 ++++++ 18 files changed, 218 insertions(+), 3 deletions(-) create mode 100644 linter/comparesame/comparesame.go create mode 100644 linter/cryptorand/cryptorand.go create mode 100644 linter/errcheck/errcheck.go create mode 100644 linter/featureconfig/featureconfig.go create mode 100644 linter/gocognit/gocognit.go create mode 100644 linter/ineffassign/ineffassign.go create mode 100644 linter/interfacechecker/interfacechecker.go create mode 100644 linter/logruswitherror/logruswitherror.go create mode 100644 linter/maligned/maligned.go create mode 100644 linter/properpermissions/properpermissions.go create mode 100644 linter/shadowpredecl/shadowpredecl.go create mode 100644 linter/slicedirect/slicedirect.go create mode 100644 linter/uintcast/uintcast.go diff --git a/Makefile b/Makefile index 42fd1e3595..ddb94d97ab 100644 --- a/Makefile +++ b/Makefile @@ -308,6 +308,44 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro .make/lint: $(DEP_PREDICATE) build-node-deps $(ORDER_ONLY_PREDICATE) .make go run ./linter/recursivelock ./... + go run ./linter/comparesame ./... + + # Disabled since we have a lot of use of math/rand package. + # We should probably move to crypto/rand at some point even though most of + # our uses doesn't seem to be security sensitive. + # TODO fix this and enable. + # go run ./linter/cryptorand ./... + + # This yields lot of legitimate warnings, most of which in practice would + # probably never happen. + # # TODO fix this and enable. + # go run ./linter/errcheck ./... + + go run ./linter/featureconfig ./... + + # Disabled since we have high cognitive complexity several places. + # TODO fix this and enable. + # go run ./linter/gocognit ./... + + go run ./linter/ineffassign ./... + go run ./linter/interfacechecker ./... + go run ./linter/logruswitherror ./... + + # Yields several non-optimally sorted structs, not sure this is very + # for us. Probably fine to not have this linter. + # go run ./linter/maligned ./... + + # Don't think this is really relevant for us. + # go run ./linter/properpermissions ./... + + go run ./linter/shadowpredecl ./... + go run ./linter/slicedirect ./... + + # Disabled since it fails many places, although ones I looked into seem + # to be false positives logically. + # TODO fix this and enable and mark false positives with lint ignore. + # go run ./linter/uintcast ./... + go run ./linter/koanf ./... go run ./linter/pointercheck ./... golangci-lint run --fix diff --git a/arbos/l1pricing/l1pricing.go b/arbos/l1pricing/l1pricing.go index 27ecae8b85..be5540b601 100644 --- a/arbos/l1pricing/l1pricing.go +++ b/arbos/l1pricing/l1pricing.go @@ -259,11 +259,11 @@ func (ps *L1PricingState) AddToL1FeesAvailable(delta *big.Int) (*big.Int, error) if err != nil { return nil, err } - new := new(big.Int).Add(old, delta) - if err := ps.SetL1FeesAvailable(new); err != nil { + newFee := new(big.Int).Add(old, delta) + if err := ps.SetL1FeesAvailable(newFee); err != nil { return nil, err } - return new, nil + return newFee, nil } func (ps *L1PricingState) TransferFromL1FeesAvailable( diff --git a/go.mod b/go.mod index e504542a2f..f87887e18c 100644 --- a/go.mod +++ b/go.mod @@ -124,6 +124,7 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect github.com/gorilla/mux v1.8.0 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/h2non/filetype v1.0.6 // indirect @@ -253,6 +254,7 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/urfave/cli/v2 v2.25.7 // indirect + github.com/uudashr/gocognit v1.0.5 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect diff --git a/go.sum b/go.sum index 0a9649c434..4e3f0f62b4 100644 --- a/go.sum +++ b/go.sum @@ -581,6 +581,9 @@ github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -625,6 +628,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -1420,6 +1424,11 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.4.0 h1:CtfRrOVZtbDj8rt1WXjklw0kqqJQwICrCKmlfUuBUUw= github.com/openzipkin/zipkin-go v0.4.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0R4l6Zg0P1tTQ= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1628,6 +1637,8 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= @@ -1658,6 +1669,8 @@ github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6f github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= @@ -1863,6 +1876,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= @@ -2139,8 +2153,10 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo= diff --git a/linter/comparesame/comparesame.go b/linter/comparesame/comparesame.go new file mode 100644 index 0000000000..1d91592e99 --- /dev/null +++ b/linter/comparesame/comparesame.go @@ -0,0 +1,12 @@ +// Static Analyzer to ensure code does not contain comparisons of identical expressions. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/comparesame" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(comparesame.Analyzer) +} diff --git a/linter/cryptorand/cryptorand.go b/linter/cryptorand/cryptorand.go new file mode 100644 index 0000000000..a7cd1ca65a --- /dev/null +++ b/linter/cryptorand/cryptorand.go @@ -0,0 +1,13 @@ +// Static Analyzer to ensure the crypto/rand package is used for randomness +// throughout the codebase. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/cryptorand" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(cryptorand.Analyzer) +} diff --git a/linter/errcheck/errcheck.go b/linter/errcheck/errcheck.go new file mode 100644 index 0000000000..8275b2bab1 --- /dev/null +++ b/linter/errcheck/errcheck.go @@ -0,0 +1,12 @@ +// Static analyzer to ensure that errors are handled in go code. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/errcheck" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(errcheck.Analyzer) +} diff --git a/linter/featureconfig/featureconfig.go b/linter/featureconfig/featureconfig.go new file mode 100644 index 0000000000..bb9c2dd3a1 --- /dev/null +++ b/linter/featureconfig/featureconfig.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/featureconfig" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(featureconfig.Analyzer) +} diff --git a/linter/gocognit/gocognit.go b/linter/gocognit/gocognit.go new file mode 100644 index 0000000000..a50d0ac08d --- /dev/null +++ b/linter/gocognit/gocognit.go @@ -0,0 +1,13 @@ +// Static analyzer that checks for high cognitive complexity and complains when +// it's too high. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/gocognit" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(gocognit.Analyzer) +} diff --git a/linter/ineffassign/ineffassign.go b/linter/ineffassign/ineffassign.go new file mode 100644 index 0000000000..df986b57ee --- /dev/null +++ b/linter/ineffassign/ineffassign.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/ineffassign" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(ineffassign.Analyzer) +} diff --git a/linter/interfacechecker/interfacechecker.go b/linter/interfacechecker/interfacechecker.go new file mode 100644 index 0000000000..a12c71a611 --- /dev/null +++ b/linter/interfacechecker/interfacechecker.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/interfacechecker" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(interfacechecker.Analyzer) +} diff --git a/linter/logruswitherror/logruswitherror.go b/linter/logruswitherror/logruswitherror.go new file mode 100644 index 0000000000..a868cdbeee --- /dev/null +++ b/linter/logruswitherror/logruswitherror.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/logruswitherror" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(logruswitherror.Analyzer) +} diff --git a/linter/maligned/maligned.go b/linter/maligned/maligned.go new file mode 100644 index 0000000000..3a693fad88 --- /dev/null +++ b/linter/maligned/maligned.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/maligned" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(maligned.Analyzer) +} diff --git a/linter/properpermissions/properpermissions.go b/linter/properpermissions/properpermissions.go new file mode 100644 index 0000000000..e9047125b7 --- /dev/null +++ b/linter/properpermissions/properpermissions.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/properpermissions" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(properpermissions.Analyzer) +} diff --git a/linter/recursivelock/recursivelock.go b/linter/recursivelock/recursivelock.go index 9349759cf3..8c8caff382 100644 --- a/linter/recursivelock/recursivelock.go +++ b/linter/recursivelock/recursivelock.go @@ -1,3 +1,4 @@ +// Static Analyzer for detecting nested or recursive mutex read lock statements. package main import ( diff --git a/linter/shadowpredecl/shadowpredecl.go b/linter/shadowpredecl/shadowpredecl.go new file mode 100644 index 0000000000..d11c1c04c6 --- /dev/null +++ b/linter/shadowpredecl/shadowpredecl.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/shadowpredecl" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(shadowpredecl.Analyzer) +} diff --git a/linter/slicedirect/slicedirect.go b/linter/slicedirect/slicedirect.go new file mode 100644 index 0000000000..c51cd9da9e --- /dev/null +++ b/linter/slicedirect/slicedirect.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/slicedirect" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(slicedirect.Analyzer) +} diff --git a/linter/uintcast/uintcast.go b/linter/uintcast/uintcast.go new file mode 100644 index 0000000000..bce88a8b9d --- /dev/null +++ b/linter/uintcast/uintcast.go @@ -0,0 +1,12 @@ +// Static analyzer to prevent leaking globals in tests. +package main + +import ( + "github.com/prysmaticlabs/prysm/v4/tools/analyzers/uintcast" + + "golang.org/x/tools/go/analysis/singlechecker" +) + +func main() { + singlechecker.Main(uintcast.Analyzer) +} From a361d296f85fd8eaf443b4a966ef1dd8610b8e5d Mon Sep 17 00:00:00 2001 From: Nodar Ambroladze Date: Wed, 13 Dec 2023 16:24:12 +0100 Subject: [PATCH 69/71] Drop linters that do not seem too relevant for nitro --- Makefile | 7 ------- linter/ineffassign/ineffassign.go | 2 +- linter/interfacechecker/interfacechecker.go | 2 +- linter/logruswitherror/logruswitherror.go | 3 ++- linter/maligned/maligned.go | 12 ------------ linter/properpermissions/properpermissions.go | 12 ------------ linter/shadowpredecl/shadowpredecl.go | 3 ++- linter/slicedirect/slicedirect.go | 3 ++- linter/uintcast/uintcast.go | 3 ++- 9 files changed, 10 insertions(+), 37 deletions(-) delete mode 100644 linter/maligned/maligned.go delete mode 100644 linter/properpermissions/properpermissions.go diff --git a/Makefile b/Makefile index ddb94d97ab..d7f4599db3 100644 --- a/Makefile +++ b/Makefile @@ -331,13 +331,6 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(arbitrator_pro go run ./linter/interfacechecker ./... go run ./linter/logruswitherror ./... - # Yields several non-optimally sorted structs, not sure this is very - # for us. Probably fine to not have this linter. - # go run ./linter/maligned ./... - - # Don't think this is really relevant for us. - # go run ./linter/properpermissions ./... - go run ./linter/shadowpredecl ./... go run ./linter/slicedirect ./... diff --git a/linter/ineffassign/ineffassign.go b/linter/ineffassign/ineffassign.go index df986b57ee..697e104822 100644 --- a/linter/ineffassign/ineffassign.go +++ b/linter/ineffassign/ineffassign.go @@ -1,4 +1,4 @@ -// Static analyzer to prevent leaking globals in tests. +// Static analyzer for detecting ineffectual assignments. package main import ( diff --git a/linter/interfacechecker/interfacechecker.go b/linter/interfacechecker/interfacechecker.go index a12c71a611..50bb56f749 100644 --- a/linter/interfacechecker/interfacechecker.go +++ b/linter/interfacechecker/interfacechecker.go @@ -1,4 +1,4 @@ -// Static analyzer to prevent leaking globals in tests. +// Static analyzer to prevent conditional checks on select interfaces. package main import ( diff --git a/linter/logruswitherror/logruswitherror.go b/linter/logruswitherror/logruswitherror.go index a868cdbeee..d0da1fcb12 100644 --- a/linter/logruswitherror/logruswitherror.go +++ b/linter/logruswitherror/logruswitherror.go @@ -1,4 +1,5 @@ -// Static analyzer to prevent leaking globals in tests. +// Static analyzer to ensure that log statements do not use errors in +// templated log statements. Authors should use logrus.WithError(). package main import ( diff --git a/linter/maligned/maligned.go b/linter/maligned/maligned.go deleted file mode 100644 index 3a693fad88..0000000000 --- a/linter/maligned/maligned.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static analyzer to prevent leaking globals in tests. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/maligned" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(maligned.Analyzer) -} diff --git a/linter/properpermissions/properpermissions.go b/linter/properpermissions/properpermissions.go deleted file mode 100644 index e9047125b7..0000000000 --- a/linter/properpermissions/properpermissions.go +++ /dev/null @@ -1,12 +0,0 @@ -// Static analyzer to prevent leaking globals in tests. -package main - -import ( - "github.com/prysmaticlabs/prysm/v4/tools/analyzers/properpermissions" - - "golang.org/x/tools/go/analysis/singlechecker" -) - -func main() { - singlechecker.Main(properpermissions.Analyzer) -} diff --git a/linter/shadowpredecl/shadowpredecl.go b/linter/shadowpredecl/shadowpredecl.go index d11c1c04c6..e51ff0da83 100644 --- a/linter/shadowpredecl/shadowpredecl.go +++ b/linter/shadowpredecl/shadowpredecl.go @@ -1,4 +1,5 @@ -// Static analyzer to prevent leaking globals in tests. +// Static analyzer which disallows declaring constructs that shadow predeclared +// Go identifiers by having the same name. package main import ( diff --git a/linter/slicedirect/slicedirect.go b/linter/slicedirect/slicedirect.go index c51cd9da9e..b31404013d 100644 --- a/linter/slicedirect/slicedirect.go +++ b/linter/slicedirect/slicedirect.go @@ -1,4 +1,5 @@ -// Static analyzer to prevent leaking globals in tests. +// Static analyzer to ensure that code does not contain applications of [:] +// on expressions which are already slices. package main import ( diff --git a/linter/uintcast/uintcast.go b/linter/uintcast/uintcast.go index bce88a8b9d..1f0eb78575 100644 --- a/linter/uintcast/uintcast.go +++ b/linter/uintcast/uintcast.go @@ -1,4 +1,5 @@ -// Static analyzer to prevent leaking globals in tests. +// Static analyzer for detecting unsafe uint to int casts. +// Use `lint:ignore uintcast` with proper justification to ignore this check. package main import ( From 69e8dde1dfc14758957ce4d3b45dd4e95ed49923 Mon Sep 17 00:00:00 2001 From: Christopher Lamb <37453482+lambchr@users.noreply.github.com> Date: Wed, 13 Dec 2023 16:19:09 +0000 Subject: [PATCH 70/71] remove recursive lock calls from backlogSegment object --- broadcaster/backlog/backlog.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/broadcaster/backlog/backlog.go b/broadcaster/backlog/backlog.go index dee3aa92e0..a1bd2c302f 100644 --- a/broadcaster/backlog/backlog.go +++ b/broadcaster/backlog/backlog.go @@ -294,6 +294,12 @@ func (s *backlogSegment) start() uint64 { func (s *backlogSegment) End() uint64 { s.messagesLock.RLock() defer s.messagesLock.RUnlock() + return s.end() +} + +// end allows the first message to be retrieved from functions that already +// have the messagesLock. +func (s *backlogSegment) end() uint64 { c := len(s.messages) if c == 0 { return uint64(0) @@ -328,7 +334,7 @@ func (s *backlogSegment) Get(start, end uint64) ([]*m.BroadcastFeedMessage, erro return noMsgs, errOutOfBounds } - if end > s.End() { + if end > s.end() { return noMsgs, errOutOfBounds } @@ -367,7 +373,7 @@ func (s *backlogSegment) Contains(i uint64) bool { s.messagesLock.RLock() defer s.messagesLock.RUnlock() start := s.start() - if i < start || i > s.End() { + if i < start || i > s.end() { return false } From 629c9e3e2e167efc7bf158cefa313cee864b862c Mon Sep 17 00:00:00 2001 From: Lee Bousfield Date: Wed, 13 Dec 2023 10:25:34 -0700 Subject: [PATCH 71/71] Make makeClient function a field instead of a global --- broadcastclients/broadcastclients.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/broadcastclients/broadcastclients.go b/broadcastclients/broadcastclients.go index 16c465b9bf..29f5e5192a 100644 --- a/broadcastclients/broadcastclients.go +++ b/broadcastclients/broadcastclients.go @@ -43,6 +43,7 @@ type BroadcastClients struct { primaryClients []*broadcastclient.BroadcastClient secondaryClients []*broadcastclient.BroadcastClient secondaryURL []string + makeClient func(string, *Router) (*broadcastclient.BroadcastClient, error) primaryRouter *Router secondaryRouter *Router @@ -51,8 +52,6 @@ type BroadcastClients struct { connected int32 } -var makeClient func(string, *Router) (*broadcastclient.BroadcastClient, error) - func NewBroadcastClients( configFetcher broadcastclient.ConfigFetcher, l2ChainId uint64, @@ -81,7 +80,7 @@ func NewBroadcastClients( secondaryClients: make([]*broadcastclient.BroadcastClient, 0, len(config.SecondaryURL)), secondaryURL: config.SecondaryURL, } - makeClient = func(url string, router *Router) (*broadcastclient.BroadcastClient, error) { + clients.makeClient = func(url string, router *Router) (*broadcastclient.BroadcastClient, error) { return broadcastclient.NewBroadcastClient( configFetcher, url, @@ -97,7 +96,7 @@ func NewBroadcastClients( var lastClientErr error for _, address := range config.URL { - client, err := makeClient(address, clients.primaryRouter) + client, err := clients.makeClient(address, clients.primaryRouter) if err != nil { lastClientErr = err log.Warn("init broadcast client failed", "address", address) @@ -243,7 +242,7 @@ func (bcs *BroadcastClients) startSecondaryFeed(ctx context.Context) { pos := len(bcs.secondaryClients) if pos < len(bcs.secondaryURL) { url := bcs.secondaryURL[pos] - client, err := makeClient(url, bcs.secondaryRouter) + client, err := bcs.makeClient(url, bcs.secondaryRouter) if err != nil { log.Warn("init broadcast secondary client failed", "address", url) bcs.secondaryURL = append(bcs.secondaryURL[:pos], bcs.secondaryURL[pos+1:]...)