diff --git a/.changeset/unlucky-lemons-learn.md b/.changeset/unlucky-lemons-learn.md new file mode 100644 index 00000000000..3e339639958 --- /dev/null +++ b/.changeset/unlucky-lemons-learn.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +Use the new log buffer in automation #changed diff --git a/core/services/job/helpers_test.go b/core/services/job/helpers_test.go index d69056ae819..6f9bee66a0c 100644 --- a/core/services/job/helpers_test.go +++ b/core/services/job/helpers_test.go @@ -71,7 +71,6 @@ maxServiceWorkers = 100 cacheEvictionInterval = "1s" mercuryCredentialName = "%s" contractVersion = "v2.1" -useBufferV1 = %v ` voterTurnoutDataSourceTemplate = ` // data source 1 @@ -277,7 +276,7 @@ func makeOCR2Keeper21JobSpec(t testing.TB, ks keystore.Master, transmitter commo _, registry := cltest.MustInsertRandomKey(t, ks.Eth()) ocr2Keeper21Job := fmt.Sprintf(ocr2Keeper21JobSpecTemplate, registry.String(), kb.ID(), transmitter, - fmt.Sprintf("%s127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), chainID, "mercury cred", false) + fmt.Sprintf("%s127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), chainID, "mercury cred") jobSpec := makeOCR2JobSpecFromToml(t, ocr2Keeper21Job) diff --git a/core/services/ocr2/delegate.go b/core/services/ocr2/delegate.go index a6843401aa7..db0f4e9725e 100644 --- a/core/services/ocr2/delegate.go +++ b/core/services/ocr2/delegate.go @@ -55,7 +55,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/autotelemetry21" ocr2keeper21core "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" - "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/validate" "github.com/smartcontractkit/chainlink/v2/core/services/ocrcommon" "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" @@ -1123,14 +1122,6 @@ func (d *Delegate) newServicesOCR2Keepers21( return nil, errors.New("could not coerce PluginProvider to AutomationProvider") } - // TODO: (AUTO-9355) remove once we remove v0 - if useBufferV1 := cfg.UseBufferV1 != nil && *cfg.UseBufferV1; useBufferV1 { - logProviderFeatures, ok := keeperProvider.LogEventProvider().(logprovider.LogEventProviderFeatures) - if ok { - logProviderFeatures.WithBufferVersion("v1") - } - } - services, err := ocr2keeper.EVMDependencies21(kb) if err != nil { return nil, errors.Wrap(err, "could not build dependencies for ocr2 keepers") diff --git a/core/services/ocr2/plugins/ocr2keeper/config.go b/core/services/ocr2/plugins/ocr2keeper/config.go index 4b41e5a0285..ec56f9c6993 100644 --- a/core/services/ocr2/plugins/ocr2keeper/config.go +++ b/core/services/ocr2/plugins/ocr2keeper/config.go @@ -60,9 +60,6 @@ type PluginConfig struct { ContractVersion string `json:"contractVersion"` // CaptureAutomationCustomTelemetry is a bool flag to toggle Custom Telemetry Service CaptureAutomationCustomTelemetry *bool `json:"captureAutomationCustomTelemetry,omitempty"` - // UseBufferV1 is a bool flag to toggle the new log buffer implementation - // TODO: (AUTO-9355) remove once we have a single version - UseBufferV1 *bool `json:"useBufferV1,omitempty"` } func ValidatePluginConfig(cfg PluginConfig) error { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer.go deleted file mode 100644 index af934a08013..00000000000 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer.go +++ /dev/null @@ -1,388 +0,0 @@ -package logprovider - -import ( - "encoding/hex" - "math/big" - "sort" - "sync" - "sync/atomic" - - "github.com/smartcontractkit/chainlink-automation/pkg/v3/random" - ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" - - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" - "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/prommetrics" -) - -var ( - // defaultFastExecLogsHigh is the default upper bound / maximum number of logs that Automation is committed to process for each upkeep, - // based on available capacity, i.e. if there are no logs from other upkeeps. - // Used by Log buffer to limit the number of logs we are saving in memory for each upkeep in a block - defaultFastExecLogsHigh = 32 - // defaultNumOfLogUpkeeps is the default number of log upkeeps supported by the registry. - defaultNumOfLogUpkeeps = 50 -) - -// fetchedLog holds the log and the ID of the upkeep -type fetchedLog struct { - upkeepID *big.Int - log logpoller.Log - // cachedLogID is the cached log identifier, used for sorting. - // It is calculated lazily, and cached for performance. - cachedLogID string -} - -func (l *fetchedLog) getLogID() string { - if len(l.cachedLogID) == 0 { - ext := ocr2keepers.LogTriggerExtension{ - Index: uint32(l.log.LogIndex), - } - copy(ext.TxHash[:], l.log.TxHash[:]) - copy(ext.BlockHash[:], l.log.BlockHash[:]) - l.cachedLogID = hex.EncodeToString(ext.LogIdentifier()) - } - return l.cachedLogID -} - -// fetchedBlock holds the logs fetched for a block -type fetchedBlock struct { - blockNumber int64 - // logs is the logs fetched for the block and haven't been visited yet - logs []fetchedLog - // visited is the logs fetched for the block and have been visited. - // We keep them in order to avoid fetching them again. - visited []fetchedLog -} - -func (b *fetchedBlock) Append(lggr logger.Logger, fl fetchedLog, maxBlockLogs, maxUpkeepLogs int) (fetchedLog, bool) { - has, upkeepLogs := b.has(fl.upkeepID, fl.log) - if has { - // Skipping known logs - return fetchedLog{}, false - } - // lggr.Debugw("Adding log", "i", i, "blockBlock", currentBlock.blockNumber, "logBlock", log.BlockNumber, "id", id) - b.logs = append(b.logs, fl) - - // drop logs if we reached limits. - if upkeepLogs+1 > maxUpkeepLogs { - // in case we have logs overflow for a particular upkeep, we drop a log of that upkeep, - // based on shared, random (per block) order of the logs in the block. - b.Sort() - var dropped fetchedLog - currentLogs := make([]fetchedLog, 0, len(b.logs)-1) - for _, l := range b.logs { - if dropped.upkeepID == nil && l.upkeepID.Cmp(fl.upkeepID) == 0 { - dropped = l - continue - } - currentLogs = append(currentLogs, l) - } - b.logs = currentLogs - return dropped, true - } else if len(b.logs)+len(b.visited) > maxBlockLogs { - // in case we have logs overflow in the buffer level, we drop a log based on - // shared, random (per block) order of the logs in the block. - b.Sort() - dropped := b.logs[0] - b.logs = b.logs[1:] - return dropped, true - } - - return fetchedLog{}, true -} - -// Has returns true if the block has the log, -// and the number of logs for that upkeep in the block. -func (b fetchedBlock) has(id *big.Int, log logpoller.Log) (bool, int) { - allLogs := append(b.logs, b.visited...) - upkeepLogs := 0 - for _, l := range allLogs { - if l.upkeepID.Cmp(id) != 0 { - continue - } - upkeepLogs++ - if l.log.BlockHash == log.BlockHash && l.log.TxHash == log.TxHash && l.log.LogIndex == log.LogIndex { - return true, upkeepLogs - } - } - return false, upkeepLogs -} - -func (b fetchedBlock) Clone() fetchedBlock { - logs := make([]fetchedLog, len(b.logs)) - copy(logs, b.logs) - visited := make([]fetchedLog, len(b.visited)) - copy(visited, b.visited) - return fetchedBlock{ - blockNumber: b.blockNumber, - logs: logs, - visited: visited, - } -} - -// Sort by log identifiers, shuffled using a pseduorandom souce that is shared across all nodes -// for a given block. -func (b *fetchedBlock) Sort() { - randSeed := random.GetRandomKeySource(nil, uint64(b.blockNumber)) - - shuffledLogIDs := make(map[string]string, len(b.logs)) - for _, log := range b.logs { - logID := log.getLogID() - shuffledLogIDs[logID] = random.ShuffleString(logID, randSeed) - } - - sort.SliceStable(b.logs, func(i, j int) bool { - return shuffledLogIDs[b.logs[i].getLogID()] < shuffledLogIDs[b.logs[j].getLogID()] - }) -} - -// logEventBuffer is a circular/ring buffer of fetched logs. -// Each entry in the buffer represents a block, -// and holds the logs fetched for that block. -type logEventBuffer struct { - lggr logger.Logger - lock sync.RWMutex - // size is the number of blocks supported by the buffer - size int32 - - numOfLogUpkeeps, fastExecLogsHigh uint32 - // blocks is the circular buffer of fetched blocks - blocks []fetchedBlock - // latestBlock is the latest block number seen - latestBlock int64 -} - -func newLogEventBuffer(lggr logger.Logger, size, numOfLogUpkeeps, fastExecLogsHigh int) *logEventBuffer { - return &logEventBuffer{ - lggr: lggr.Named("KeepersRegistry.LogEventBuffer"), - size: int32(size), - blocks: make([]fetchedBlock, size), - numOfLogUpkeeps: uint32(numOfLogUpkeeps), - fastExecLogsHigh: uint32(fastExecLogsHigh), - } -} - -func (b *logEventBuffer) latestBlockSeen() int64 { - return atomic.LoadInt64(&b.latestBlock) -} - -func (b *logEventBuffer) bufferSize() int { - return int(atomic.LoadInt32(&b.size)) -} - -func (b *logEventBuffer) SetLimits(numOfLogUpkeeps, fastExecLogsHigh int) { - atomic.StoreUint32(&b.numOfLogUpkeeps, uint32(numOfLogUpkeeps)) - atomic.StoreUint32(&b.fastExecLogsHigh, uint32(fastExecLogsHigh)) -} - -// enqueue adds logs (if not exist) to the buffer, returning the number of logs added -// minus the number of logs dropped. -func (b *logEventBuffer) enqueue(id *big.Int, logs ...logpoller.Log) int { - b.lock.Lock() - defer b.lock.Unlock() - - lggr := b.lggr.With("id", id.String()) - - maxBlockLogs := int(atomic.LoadUint32(&b.fastExecLogsHigh) * atomic.LoadUint32(&b.numOfLogUpkeeps)) - maxUpkeepLogs := int(atomic.LoadUint32(&b.fastExecLogsHigh)) - - latestBlock := b.latestBlockSeen() - added, dropped := 0, 0 - - for _, log := range logs { - if log.BlockNumber == 0 { - // invalid log - continue - } - i := b.blockNumberIndex(log.BlockNumber) - currentBlock := b.blocks[i] - if currentBlock.blockNumber < log.BlockNumber { - lggr.Debugw("Got log on a new block", "prevBlock", currentBlock.blockNumber, "newBlock", log.BlockNumber) - currentBlock.blockNumber = log.BlockNumber - currentBlock.logs = nil - currentBlock.visited = nil - } else if currentBlock.blockNumber > log.BlockNumber { - // not expected to happen - lggr.Debugw("Skipping log from old block", "currentBlock", currentBlock.blockNumber, "newBlock", log.BlockNumber) - continue - } - droppedLog, ok := currentBlock.Append(lggr, fetchedLog{upkeepID: id, log: log}, maxBlockLogs, maxUpkeepLogs) - if !ok { - // Skipping known logs - continue - } - if droppedLog.upkeepID != nil { - dropped++ - lggr.Debugw("Reached log buffer limits, dropping log", "blockNumber", droppedLog.log.BlockNumber, - "blockHash", droppedLog.log.BlockHash, "txHash", droppedLog.log.TxHash, "logIndex", droppedLog.log.LogIndex, - "upkeepID", droppedLog.upkeepID.String()) - } - added++ - b.blocks[i] = currentBlock - - if log.BlockNumber > latestBlock { - latestBlock = log.BlockNumber - } - } - - if latestBlock > b.latestBlockSeen() { - atomic.StoreInt64(&b.latestBlock, latestBlock) - } - if added > 0 { - lggr.Debugw("Added logs to buffer", "addedLogs", added, "dropped", dropped, "latestBlock", latestBlock) - prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionIngress).Add(float64(added)) - prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionDropped).Add(float64(dropped)) - } - - return added - dropped -} - -// peek returns the logs in range [latestBlock-blocks, latestBlock] -func (b *logEventBuffer) peek(blocks int) []fetchedLog { - latestBlock := b.latestBlockSeen() - if latestBlock == 0 { - return nil - } - if blocks > int(latestBlock) { - blocks = int(latestBlock) - 1 - } - - return b.peekRange(latestBlock-int64(blocks), latestBlock) -} - -// peekRange returns the logs between start and end inclusive. -func (b *logEventBuffer) peekRange(start, end int64) []fetchedLog { - b.lock.RLock() - defer b.lock.RUnlock() - - blocksInRange := b.getBlocksInRange(int(start), int(end)) - - var results []fetchedLog - for _, block := range blocksInRange { - // double checking that we don't have any gaps in the range - if block.blockNumber < start || block.blockNumber > end { - continue - } - results = append(results, block.logs...) - } - - sort.SliceStable(results, func(i, j int) bool { - return results[i].log.BlockNumber < results[j].log.BlockNumber - }) - - b.lggr.Debugw("Peeked logs", "results", len(results), "start", start, "end", end) - - return results -} - -// dequeueRange returns the logs between start and end inclusive. -func (b *logEventBuffer) dequeueRange(start, end int64, upkeepLimit, totalLimit int) []fetchedLog { - b.lock.Lock() - defer b.lock.Unlock() - - blocksInRange := b.getBlocksInRange(int(start), int(end)) - fetchedBlocks := make([]fetchedBlock, 0, len(blocksInRange)) - for _, block := range blocksInRange { - // Create clone of the blocks as they get processed and update underlying b.blocks - fetchedBlocks = append(fetchedBlocks, block.Clone()) - } - - // Sort the blocks in reverse order of block number so that latest logs - // are preferred while dequeueing. - sort.SliceStable(fetchedBlocks, func(i, j int) bool { - return fetchedBlocks[i].blockNumber > fetchedBlocks[j].blockNumber - }) - - logsCount := map[string]int{} - totalCount := 0 - var results []fetchedLog - for _, block := range fetchedBlocks { - if block.blockNumber < start || block.blockNumber > end { - // double checking that we don't have any gaps in the range - continue - } - if totalCount >= totalLimit { - // reached total limit, no need to process more blocks - break - } - // Sort the logs in random order that is shared across all nodes. - // This ensures that nodes across the network will process the same logs. - block.Sort() - var remainingLogs, blockResults []fetchedLog - for _, log := range block.logs { - if totalCount >= totalLimit { - remainingLogs = append(remainingLogs, log) - continue - } - if logsCount[log.upkeepID.String()] >= upkeepLimit { - remainingLogs = append(remainingLogs, log) - continue - } - blockResults = append(blockResults, log) - logsCount[log.upkeepID.String()]++ - totalCount++ - } - if len(blockResults) == 0 { - continue - } - results = append(results, blockResults...) - block.visited = append(block.visited, blockResults...) - block.logs = remainingLogs - b.blocks[b.blockNumberIndex(block.blockNumber)] = block - } - - if len(results) > 0 { - b.lggr.Debugw("Dequeued logs", "results", len(results), "start", start, "end", end) - prommetrics.AutomationLogBufferFlow.WithLabelValues(prommetrics.LogBufferFlowDirectionEgress).Add(float64(len(results))) - } - - return results -} - -// getBlocksInRange returns the blocks between start and end. -// NOTE: this function should be called with the lock held -func (b *logEventBuffer) getBlocksInRange(start, end int) []fetchedBlock { - var blocksInRange []fetchedBlock - start, end = b.blockRangeToIndices(start, end) - if start == -1 || end == -1 { - // invalid range - return blocksInRange - } - if start <= end { - // Normal range, need to return indices from start to end(inclusive) - return b.blocks[start : end+1] - } - // in case we get circular range such as [0, 1, end, ... , start, ..., size-1] - // we need to return the blocks in two ranges: [0, end](inclusive) and [start, size-1] - blocksInRange = append(blocksInRange, b.blocks[start:]...) - blocksInRange = append(blocksInRange, b.blocks[:end+1]...) - - return blocksInRange -} - -// blockRangeToIndices returns the normalized range of start to end block range, -// to indices aligned with buffer size. Note ranges inclusive of start, end indices. -func (b *logEventBuffer) blockRangeToIndices(start, end int) (int, int) { - latest := b.latestBlockSeen() - if end > int(latest) { - // Limit end of range to latest block seen - end = int(latest) - } - if end < start || start == 0 || end == 0 { - // invalid range - return -1, -1 - } - size := b.bufferSize() - if end-start >= size { - // If range requires more than buffer size blocks, only to return - // last size blocks as that's the max the buffer stores. - start = (end - size) + 1 - } - return b.blockNumberIndex(int64(start)), b.blockNumberIndex(int64(end)) -} - -// blockNumberIndex returns the index of the block in the buffer -func (b *logEventBuffer) blockNumberIndex(bn int64) int { - return int(bn-1) % b.bufferSize() -} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_test.go deleted file mode 100644 index dca43ca14ac..00000000000 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/buffer_test.go +++ /dev/null @@ -1,892 +0,0 @@ -package logprovider - -import ( - "encoding/hex" - "fmt" - "math/big" - "testing" - - "github.com/smartcontractkit/chainlink-automation/pkg/v3/types" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" - - ocr2keepers "github.com/smartcontractkit/chainlink-common/pkg/types/automation" - - "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" - "github.com/smartcontractkit/chainlink/v2/core/logger" - "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/core" -) - -func TestLogEventBuffer_GetBlocksInRange(t *testing.T) { - size := 3 - maxSeenBlock := int64(4) - buf := newLogEventBuffer(logger.TestLogger(t), size, 10, 10) - - buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - ) - - buf.enqueue(big.NewInt(2), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 2}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 2}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ) - - tests := []struct { - name string - from int64 - to int64 - want int - }{ - { - name: "all", - from: 2, - to: 4, - want: 3, - }, - { - name: "partial", - from: 2, - to: 3, - want: 2, - }, - { - name: "circular", - from: 3, - to: 4, - want: 2, - }, - { - name: "zero start", - from: 0, - to: 2, - }, - { - name: "invalid zero end", - from: 0, - to: 0, - }, - { - name: "invalid from larger than to", - from: 4, - to: 2, - }, - { - name: "outside max last seen", - from: 5, - to: 10, - }, - { - name: "limited by max last seen", - from: 2, - to: 5, - want: 3, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - blocks := buf.getBlocksInRange(int(tc.from), int(tc.to)) - require.Equal(t, tc.want, len(blocks)) - if tc.want > 0 { - from := tc.from - require.Equal(t, from, blocks[0].blockNumber) - to := tc.to - if to >= maxSeenBlock { - to = maxSeenBlock - } - require.Equal(t, to, blocks[len(blocks)-1].blockNumber) - } - }) - } -} - -func TestLogEventBuffer_GetBlocksInRange_Circular(t *testing.T) { - size := 4 - buf := newLogEventBuffer(logger.TestLogger(t), size, 10, 10) - - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - ), 3) - - require.Equal(t, buf.enqueue(big.NewInt(2), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 2}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 2}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ), 3) - - require.Equal(t, buf.enqueue(big.NewInt(3), - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 4}, - logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x3"), LogIndex: 2}, - logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x3"), LogIndex: 5}, - ), 3) - - tests := []struct { - name string - from int64 - to int64 - expectedBlocks []int64 - }{ - { - name: "happy flow", - from: 2, - to: 5, - expectedBlocks: []int64{2, 3, 4, 5}, - }, - { - name: "range overflow circular", - from: 1, - to: 6, - expectedBlocks: []int64{2, 3, 4, 5}, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - blocks := buf.getBlocksInRange(int(tc.from), int(tc.to)) - require.Equal(t, len(tc.expectedBlocks), len(blocks)) - expectedBlockNumbers := map[int64]bool{} - for _, b := range tc.expectedBlocks { - expectedBlockNumbers[b] = false - } - for _, b := range blocks { - expectedBlockNumbers[b.blockNumber] = true - } - for k, v := range expectedBlockNumbers { - require.True(t, v, "missing block %d", k) - } - }) - } -} - -func TestLogEventBuffer_EnqueueDequeue(t *testing.T) { - t.Run("dequeue empty", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) - - results := buf.peekRange(int64(1), int64(2)) - require.Equal(t, 0, len(results)) - results = buf.peek(2) - require.Equal(t, 0, len(results)) - }) - - t.Run("enqueue", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) - - buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ) - buf.lock.Lock() - require.Equal(t, 2, len(buf.blocks[0].logs)) - buf.lock.Unlock() - }) - - t.Run("enqueue logs overflow", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 2, 2, 2) - - require.Equal(t, 2, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - )) - buf.lock.Lock() - require.Equal(t, 2, len(buf.blocks[0].logs)) - buf.lock.Unlock() - }) - - t.Run("enqueue logs overflow with dynamic limits", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 2, 10, 2) - - require.Equal(t, 2, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - )) - buf.SetLimits(10, 3) - require.Equal(t, 3, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 1}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 2}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 3}, - )) - - buf.lock.Lock() - defer buf.lock.Unlock() - require.Equal(t, 2, len(buf.blocks[0].logs)) - require.Equal(t, 3, len(buf.blocks[1].logs)) - }) - - t.Run("enqueue logs overflow with dynamic limits", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 2, 10, 2) - - require.Equal(t, 2, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, - )) - buf.SetLimits(10, 3) - require.Equal(t, 3, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 1}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 2}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x21"), LogIndex: 3}, - )) - - buf.lock.Lock() - defer buf.lock.Unlock() - require.Equal(t, 2, len(buf.blocks[0].logs)) - }) - - t.Run("enqueue block overflow", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 2, 10) - - require.Equal(t, 5, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, - )) - buf.lock.Lock() - require.Equal(t, 2, len(buf.blocks[0].logs)) - buf.lock.Unlock() - }) - - t.Run("enqueue upkeep block overflow", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 10, 10, 2) - - require.Equal(t, 2, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 2}, - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 3}, - )) - buf.lock.Lock() - require.Equal(t, 2, len(buf.blocks[0].logs)) - buf.lock.Unlock() - }) - - t.Run("peek range after dequeue", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) - - require.Equal(t, buf.enqueue(big.NewInt(10), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - ), 2) - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ), 2) - results := buf.peekRange(int64(1), int64(2)) - require.Equal(t, 2, len(results)) - verifyBlockNumbers(t, results, 1, 2) - removed := buf.dequeueRange(int64(1), int64(2), 2, 10) - require.Equal(t, 2, len(removed)) - results = buf.peekRange(int64(1), int64(2)) - require.Equal(t, 0, len(results)) - }) - - t.Run("enqueue peek and dequeue", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 4, 10, 10) - - require.Equal(t, buf.enqueue(big.NewInt(10), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - ), 2) - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x1"), LogIndex: 1}, - ), 2) - results := buf.peek(8) - require.Equal(t, 4, len(results)) - verifyBlockNumbers(t, results, 1, 2, 3, 3) - removed := buf.dequeueRange(1, 3, 5, 5) - require.Equal(t, 4, len(removed)) - buf.lock.Lock() - require.Equal(t, 0, len(buf.blocks[0].logs)) - require.Equal(t, int64(2), buf.blocks[1].blockNumber) - require.Equal(t, 1, len(buf.blocks[1].visited)) - buf.lock.Unlock() - }) - - t.Run("enqueue and peek range circular", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 10, 10) - - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - ), 3) - require.Equal(t, buf.enqueue(big.NewInt(10), - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - ), 2) - - results := buf.peekRange(int64(1), int64(1)) - require.Equal(t, 0, len(results)) - - results = buf.peekRange(int64(3), int64(5)) - require.Equal(t, 3, len(results)) - verifyBlockNumbers(t, results, 3, 4, 4) - }) - - t.Run("doesnt enqueue old blocks", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) - - require.Equal(t, buf.enqueue(big.NewInt(10), - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 10}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x1"), LogIndex: 11}, - ), 2) - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - ), 2) - results := buf.peekRange(int64(1), int64(5)) - fmt.Println(results) - verifyBlockNumbers(t, results, 2, 3, 4, 4) - }) - - t.Run("dequeue with limits returns latest block logs", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 0}, - logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 0}, - ), 5) - - logs := buf.dequeueRange(1, 5, 2, 10) - require.Equal(t, 2, len(logs)) - require.Equal(t, int64(5), logs[0].log.BlockNumber) - require.Equal(t, int64(4), logs[1].log.BlockNumber) - - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 4, TxHash: common.HexToHash("0x4"), LogIndex: 1}, - logpoller.Log{BlockNumber: 5, TxHash: common.HexToHash("0x5"), LogIndex: 1}, - ), 2) - - logs = buf.dequeueRange(1, 5, 3, 2) - require.Equal(t, 2, len(logs)) - }) - - t.Run("dequeue doesn't return same logs again", func(t *testing.T) { - buf := newLogEventBuffer(logger.TestLogger(t), 3, 5, 10) - require.Equal(t, buf.enqueue(big.NewInt(1), - logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}, - logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}, - logpoller.Log{BlockNumber: 3, TxHash: common.HexToHash("0x3"), LogIndex: 0}, - ), 3) - - logs := buf.dequeueRange(3, 3, 2, 10) - fmt.Println(logs) - require.Equal(t, 1, len(logs)) - - logs = buf.dequeueRange(3, 3, 2, 10) - fmt.Println(logs) - require.Equal(t, 0, len(logs)) - }) -} - -func TestLogEventBuffer_FetchedBlock_Append(t *testing.T) { - type appendArgs struct { - fl fetchedLog - maxBlockLogs, maxUpkeepLogs int - added, dropped bool - } - - tests := []struct { - name string - blockNumber int64 - logs []fetchedLog - visited []fetchedLog - toAdd []appendArgs - expected []fetchedLog - added bool - }{ - { - name: "empty block", - blockNumber: 1, - logs: []fetchedLog{}, - visited: []fetchedLog{}, - toAdd: []appendArgs{ - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 10, - maxUpkeepLogs: 2, - added: true, - }, - }, - expected: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - }, - { - name: "existing log", - blockNumber: 1, - logs: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - visited: []fetchedLog{}, - toAdd: []appendArgs{ - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 10, - maxUpkeepLogs: 2, - added: false, - }, - }, - expected: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - }, - { - name: "visited log", - blockNumber: 1, - logs: []fetchedLog{}, - visited: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - toAdd: []appendArgs{ - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 10, - maxUpkeepLogs: 2, - added: false, - }, - }, - expected: []fetchedLog{}, - }, - { - name: "upkeep log limits", - blockNumber: 1, - logs: []fetchedLog{}, - visited: []fetchedLog{}, - toAdd: []appendArgs{ - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 10, - maxUpkeepLogs: 2, - added: true, - }, - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 1, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 10, - maxUpkeepLogs: 2, - added: true, - }, - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 2, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 10, - maxUpkeepLogs: 2, - added: true, - dropped: true, - }, - }, - expected: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 1, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 2, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - }, - { - name: "block log limits", - blockNumber: 1, - logs: []fetchedLog{}, - visited: []fetchedLog{}, - toAdd: []appendArgs{ - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 2, - maxUpkeepLogs: 4, - added: true, - }, - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 1, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 2, - maxUpkeepLogs: 4, - added: true, - }, - { - fl: fetchedLog{ - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 2, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - maxBlockLogs: 2, - maxUpkeepLogs: 4, - added: true, - dropped: true, - }, - }, - expected: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 1, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 2, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - lggr := logger.TestLogger(t) - b := fetchedBlock{ - blockNumber: tc.blockNumber, - logs: make([]fetchedLog, len(tc.logs)), - visited: make([]fetchedLog, len(tc.visited)), - } - copy(b.logs, tc.logs) - copy(b.visited, tc.visited) - - for _, args := range tc.toAdd { - dropped, added := b.Append(lggr, args.fl, args.maxBlockLogs, args.maxUpkeepLogs) - require.Equal(t, args.added, added) - if args.dropped { - require.NotNil(t, dropped.upkeepID) - } else { - require.Nil(t, dropped.upkeepID) - } - } - // clear cached logIDs - for i := range b.logs { - b.logs[i].cachedLogID = "" - } - require.Equal(t, tc.expected, b.logs) - }) - } -} -func TestLogEventBuffer_FetchedBlock_Sort(t *testing.T) { - tests := []struct { - name string - blockNumber int64 - logs []fetchedLog - beforeSort []string - afterSort []string - iterations int - }{ - { - name: "no logs", - blockNumber: 10, - logs: []fetchedLog{}, - beforeSort: []string{}, - afterSort: []string{}, - }, - { - name: "single log", - blockNumber: 1, - logs: []fetchedLog{ - { - log: logpoller.Log{ - BlockHash: common.HexToHash("0x111"), - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - }, - }, - beforeSort: []string{ - "0000000000000000000000000000000000000000000000000000000000000111000000000000000000000000000000000000000000000000000000000000000100000000", - }, - afterSort: []string{ - "0000000000000000000000000000000000000000000000000000000000000111000000000000000000000000000000000000000000000000000000000000000100000000", - }, - }, - { - name: "multiple logs with 10 iterations", - blockNumber: 1, - logs: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xb711bd1103927611ee41152aa8ae27f3330"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "222").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), - LogIndex: 4, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), - LogIndex: 3, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "222").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), - LogIndex: 2, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), - LogIndex: 5, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), - LogIndex: 3, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - BlockHash: common.HexToHash("0xa25ebae1099f3fbae2525ebae279f3ae25e"), - TxHash: common.HexToHash("0xa651bd1109922111ee411525ebae27f3fb6"), - LogIndex: 1, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - beforeSort: []string{ - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000b711bd1103927611ee41152aa8ae27f333000000000", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000000", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000004", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000002", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000005", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000001", - }, - afterSort: []string{ - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000b711bd1103927611ee41152aa8ae27f333000000000", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000000", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000001", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000002", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000003", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000004", - "00000000000000000000000000000a25ebae1099f3fbae2525ebae279f3ae25e00000000000000000000000000000a651bd1109922111ee411525ebae27f3fb600000005", - }, - iterations: 10, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - b := fetchedBlock{ - blockNumber: tc.blockNumber, - logs: make([]fetchedLog, len(tc.logs)), - } - if tc.iterations == 0 { - tc.iterations = 1 - } - // performing the same multiple times should yield the same result - // default is one iteration - for i := 0; i < tc.iterations; i++ { - copy(b.logs, tc.logs) - logIDs := getLogIds(b) - require.Equal(t, len(tc.beforeSort), len(logIDs)) - require.Equal(t, tc.beforeSort, logIDs) - b.Sort() - logIDsAfterSort := getLogIds(b) - require.Equal(t, len(tc.afterSort), len(logIDsAfterSort)) - require.Equal(t, tc.afterSort, logIDsAfterSort) - } - }) - } -} - -func TestLogEventBuffer_FetchedBlock_Clone(t *testing.T) { - b1 := fetchedBlock{ - blockNumber: 1, - logs: []fetchedLog{ - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 0, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - { - log: logpoller.Log{ - BlockNumber: 1, - TxHash: common.HexToHash("0x1"), - LogIndex: 2, - }, - upkeepID: core.GenUpkeepID(types.LogTrigger, "111").BigInt(), - }, - }, - } - - b2 := b1.Clone() - require.Equal(t, b1.blockNumber, b2.blockNumber) - require.Equal(t, len(b1.logs), len(b2.logs)) - require.Equal(t, b1.logs[0].log.BlockNumber, b2.logs[0].log.BlockNumber) - - b1.blockNumber = 2 - b1.logs[0].log.BlockNumber = 2 - require.NotEqual(t, b1.blockNumber, b2.blockNumber) - require.NotEqual(t, b1.logs[0].log.BlockNumber, b2.logs[0].log.BlockNumber) -} - -func verifyBlockNumbers(t *testing.T, logs []fetchedLog, bns ...int64) { - require.Equal(t, len(bns), len(logs), "expected length mismatch") - for i, log := range logs { - require.Equal(t, bns[i], log.log.BlockNumber, "wrong block number") - } -} - -func getLogIds(b fetchedBlock) []string { - logIDs := make([]string, len(b.logs)) - for i, l := range b.logs { - ext := ocr2keepers.LogTriggerExtension{ - TxHash: l.log.TxHash, - Index: uint32(l.log.LogIndex), - BlockHash: l.log.BlockHash, - } - logIDs[i] = hex.EncodeToString(ext.LogIdentifier()) - } - return logIDs -} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go index 19302624b49..7ec65ff4740 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/factory.go @@ -35,23 +35,12 @@ type LogTriggersOptions struct { // Finality depth is the number of blocks to wait before considering a block final. FinalityDepth int64 - // TODO: (AUTO-9355) remove once we have a single version - BufferVersion BufferVersion // LogLimit is the minimum number of logs to process in a single block window. LogLimit uint32 // BlockRate determines the block window for log processing. BlockRate uint32 } -// BufferVersion is the version of the log buffer. -// TODO: (AUTO-9355) remove once we have a single version -type BufferVersion string - -const ( - BufferVersionDefault BufferVersion = "" - BufferVersionV1 BufferVersion = "v1" -) - func NewOptions(finalityDepth int64, chainID *big.Int) LogTriggersOptions { opts := new(LogTriggersOptions) opts.chainID = chainID diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go index cdd800071da..99426093951 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go @@ -34,115 +34,92 @@ import ( ) func TestIntegration_LogEventProvider(t *testing.T) { - tests := []struct { - name string - bufferVersion logprovider.BufferVersion - logLimit uint32 - }{ - { - name: "default buffer", - bufferVersion: logprovider.BufferVersionDefault, - logLimit: 10, - }, - { - name: "buffer v1", - bufferVersion: logprovider.BufferVersionV1, - logLimit: 10, - }, - } + ctx, cancel := context.WithCancel(testutils.Context(t)) + defer cancel() + + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] + + db := setupDB(t) + defer db.Close() + + opts := logprovider.NewOptions(200, big.NewInt(1)) + opts.ReadInterval = time.Second / 2 + opts.LogLimit = 10 + + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) + + n := 10 + + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - for _, tc := range tests { - bufferVersion, logLimit := tc.bufferVersion, tc.logLimit - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(testutils.Context(t)) - defer cancel() - - backend, stopMining, accounts := setupBackend(t) - defer stopMining() - carrol := accounts[2] - - db := setupDB(t) - defer db.Close() - - opts := logprovider.NewOptions(200, big.NewInt(1)) - opts.ReadInterval = time.Second / 2 - opts.BufferVersion = bufferVersion - opts.LogLimit = logLimit - - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) - logProvider := provider.(logprovider.LogEventProviderTest) - - n := 10 - - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - - ids, addrs, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) - lp.PollAndSaveLogs(ctx, int64(n)) - - go func() { - if err := logProvider.Start(ctx); err != nil { - t.Logf("error starting log provider: %s", err) - t.Fail() - } - }() - defer logProvider.Close() - - logsRounds := 10 - - poll := pollFn(ctx, t, lp, ethClient) - - triggerEvents(ctx, t, backend, carrol, logsRounds, poll, contracts...) - - poll(backend.Commit()) - - waitLogPoller(ctx, t, backend, lp, ethClient) - - waitLogProvider(ctx, t, logProvider, 3) - - allPayloads := collectPayloads(ctx, t, logProvider, n, logsRounds/2) - require.GreaterOrEqual(t, len(allPayloads), n, - "failed to get logs after restart") - - t.Run("Restart", func(t *testing.T) { - t.Log("restarting log provider") - // assuming that our service was closed and restarted, - // we should be able to backfill old logs and fetch new ones - filterStore := logprovider.NewUpkeepFilterStore() - logProvider2 := logprovider.NewLogProvider(logger.TestLogger(t), lp, big.NewInt(1), logprovider.NewLogEventsPacker(), filterStore, opts) - - poll(backend.Commit()) - go func() { - if err2 := logProvider2.Start(ctx); err2 != nil { - t.Logf("error starting log provider: %s", err2) - t.Fail() - } - }() - defer logProvider2.Close() - - // re-register filters - for i, id := range ids { - err := logProvider2.RegisterFilter(ctx, logprovider.FilterOptions{ - UpkeepID: id, - TriggerConfig: newPlainLogTriggerConfig(addrs[i]), - // using block number at which the upkeep was registered, - // before we emitted any logs - UpdateBlock: uint64(n), - }) - require.NoError(t, err) - } - - waitLogProvider(ctx, t, logProvider2, 2) - - t.Log("getting logs after restart") - logsAfterRestart := collectPayloads(ctx, t, logProvider2, n, 5) - require.GreaterOrEqual(t, len(logsAfterRestart), n, - "failed to get logs after restart") + ids, addrs, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + lp.PollAndSaveLogs(ctx, int64(n)) + + go func() { + if err := logProvider.Start(ctx); err != nil { + t.Logf("error starting log provider: %s", err) + t.Fail() + } + }() + defer logProvider.Close() + + logsRounds := 10 + + poll := pollFn(ctx, t, lp, ethClient) + + triggerEvents(ctx, t, backend, carrol, logsRounds, poll, contracts...) + + poll(backend.Commit()) + + waitLogPoller(ctx, t, backend, lp, ethClient) + + waitLogProvider(ctx, t, logProvider, 3) + + allPayloads := collectPayloads(ctx, t, logProvider, n, logsRounds/2) + require.GreaterOrEqual(t, len(allPayloads), n, + "failed to get logs after restart") + + t.Run("Restart", func(t *testing.T) { + t.Log("restarting log provider") + // assuming that our service was closed and restarted, + // we should be able to backfill old logs and fetch new ones + filterStore := logprovider.NewUpkeepFilterStore() + logProvider2 := logprovider.NewLogProvider(logger.TestLogger(t), lp, big.NewInt(1), logprovider.NewLogEventsPacker(), filterStore, opts) + + poll(backend.Commit()) + go func() { + if err2 := logProvider2.Start(ctx); err2 != nil { + t.Logf("error starting log provider: %s", err2) + t.Fail() + } + }() + defer logProvider2.Close() + + // re-register filters + for i, id := range ids { + err := logProvider2.RegisterFilter(ctx, logprovider.FilterOptions{ + UpkeepID: id, + TriggerConfig: newPlainLogTriggerConfig(addrs[i]), + // using block number at which the upkeep was registered, + // before we emitted any logs + UpdateBlock: uint64(n), }) - }) - } + require.NoError(t, err) + } + + waitLogProvider(ctx, t, logProvider2, 2) + + t.Log("getting logs after restart") + logsAfterRestart := collectPayloads(ctx, t, logProvider2, n, 5) + require.GreaterOrEqual(t, len(logsAfterRestart), n, + "failed to get logs after restart") + }) } func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { @@ -219,78 +196,55 @@ func TestIntegration_LogEventProvider_UpdateConfig(t *testing.T) { } func TestIntegration_LogEventProvider_Backfill(t *testing.T) { - tests := []struct { - name string - bufferVersion logprovider.BufferVersion - logLimit uint32 - }{ - { - name: "default buffer", - bufferVersion: logprovider.BufferVersionDefault, - logLimit: 10, - }, - { - name: "buffer v1", - bufferVersion: logprovider.BufferVersionV1, - logLimit: 10, - }, - } - - for _, tc := range tests { - bufferVersion, limitLow := tc.bufferVersion, tc.logLimit - t.Run(tc.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) - defer cancel() + ctx, cancel := context.WithTimeout(testutils.Context(t), time.Second*60) + defer cancel() - backend, stopMining, accounts := setupBackend(t) - defer stopMining() - carrol := accounts[2] + backend, stopMining, accounts := setupBackend(t) + defer stopMining() + carrol := accounts[2] - db := setupDB(t) - defer db.Close() + db := setupDB(t) + defer db.Close() - opts := logprovider.NewOptions(200, big.NewInt(1)) - opts.ReadInterval = time.Second / 4 - opts.BufferVersion = bufferVersion - opts.LogLimit = limitLow + opts := logprovider.NewOptions(200, big.NewInt(1)) + opts.ReadInterval = time.Second / 4 + opts.LogLimit = 10 - lp, ethClient := setupDependencies(t, db, backend) - filterStore := logprovider.NewUpkeepFilterStore() - provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) - logProvider := provider.(logprovider.LogEventProviderTest) + lp, ethClient := setupDependencies(t, db, backend) + filterStore := logprovider.NewUpkeepFilterStore() + provider, _ := setup(logger.TestLogger(t), lp, nil, nil, filterStore, &opts) + logProvider := provider.(logprovider.LogEventProviderTest) - n := 10 + n := 10 - backend.Commit() - lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block - _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) + backend.Commit() + lp.PollAndSaveLogs(ctx, 1) // Ensure log poller has a latest block + _, _, contracts := deployUpkeepCounter(ctx, t, n, ethClient, backend, carrol, logProvider) - poll := pollFn(ctx, t, lp, ethClient) + poll := pollFn(ctx, t, lp, ethClient) - rounds := 8 - for i := 0; i < rounds; i++ { - poll(backend.Commit()) - triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) - poll(backend.Commit()) - } + rounds := 8 + for i := 0; i < rounds; i++ { + poll(backend.Commit()) + triggerEvents(ctx, t, backend, carrol, n, poll, contracts...) + poll(backend.Commit()) + } - waitLogPoller(ctx, t, backend, lp, ethClient) + waitLogPoller(ctx, t, backend, lp, ethClient) - // starting the log provider should backfill logs - go func() { - if startErr := logProvider.Start(ctx); startErr != nil { - t.Logf("error starting log provider: %s", startErr) - t.Fail() - } - }() - defer logProvider.Close() + // starting the log provider should backfill logs + go func() { + if startErr := logProvider.Start(ctx); startErr != nil { + t.Logf("error starting log provider: %s", startErr) + t.Fail() + } + }() + defer logProvider.Close() - waitLogProvider(ctx, t, logProvider, 3) + waitLogProvider(ctx, t, logProvider, 3) - allPayloads := collectPayloads(ctx, t, logProvider, n*rounds, 5) - require.GreaterOrEqual(t, len(allPayloads), len(contracts), "failed to backfill logs") - }) - } + allPayloads := collectPayloads(ctx, t, logProvider, n*rounds, 5) + require.GreaterOrEqual(t, len(allPayloads), len(contracts), "failed to backfill logs") } func TestIntegration_LogRecoverer_Backfill(t *testing.T) { diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go index 3609d0a4654..f1de1ef5129 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider.go @@ -81,13 +81,8 @@ type LogEventProviderTest interface { CurrentPartitionIdx() uint64 } -type LogEventProviderFeatures interface { - WithBufferVersion(v BufferVersion) -} - var _ LogEventProvider = &logEventProvider{} var _ LogEventProviderTest = &logEventProvider{} -var _ LogEventProviderFeatures = &logEventProvider{} // logEventProvider manages log filters for upkeeps and enables to read the log events. type logEventProvider struct { @@ -104,8 +99,7 @@ type logEventProvider struct { registerLock sync.Mutex filterStore UpkeepFilterStore - buffer *logEventBuffer - bufferV1 LogBuffer + buffer LogBuffer opts LogTriggersOptions @@ -119,8 +113,7 @@ func NewLogProvider(lggr logger.Logger, poller logpoller.LogPoller, chainID *big threadCtrl: utils.NewThreadControl(), lggr: lggr.Named("KeepersRegistry.LogEventProvider"), packer: packer, - buffer: newLogEventBuffer(lggr, int(opts.LookbackBlocks), defaultNumOfLogUpkeeps, defaultFastExecLogsHigh), - bufferV1: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit), + buffer: NewLogBuffer(lggr, uint32(opts.LookbackBlocks), opts.BlockRate, opts.LogLimit), poller: poller, opts: opts, filterStore: filterStore, @@ -147,20 +140,7 @@ func (p *logEventProvider) SetConfig(cfg ocr2keepers.LogEventProviderConfig) { atomic.StoreUint32(&p.opts.BlockRate, blockRate) atomic.StoreUint32(&p.opts.LogLimit, logLimit) - switch p.opts.BufferVersion { - case BufferVersionV1: - p.bufferV1.SetConfig(uint32(p.opts.LookbackBlocks), blockRate, logLimit) - default: - } -} - -func (p *logEventProvider) WithBufferVersion(v BufferVersion) { - p.lock.Lock() - defer p.lock.Unlock() - - p.lggr.Debugw("with buffer version", "version", v) - - p.opts.BufferVersion = v + p.buffer.SetConfig(uint32(p.opts.LookbackBlocks), blockRate, logLimit) } func (p *logEventProvider) Start(context.Context) error { @@ -197,7 +177,7 @@ func (p *logEventProvider) Start(context.Context) error { for { select { case <-ticker.C: - if err := p.syncBufferFilters(); err != nil { + if err := p.buffer.SyncFilters(p.filterStore); err != nil { p.lggr.Warnw("failed to sync buffer filters", "err", err) } case <-ctx.Done(): @@ -259,22 +239,11 @@ func (p *logEventProvider) getLogsFromBuffer(latestBlock int64) []ocr2keepers.Up start = 1 } - switch p.opts.BufferVersion { - case BufferVersionV1: - payloads = p.minimumCommitmentDequeue(latestBlock, start) + payloads = p.minimumCommitmentDequeue(latestBlock, start) - // if we have remaining capacity following minimum commitment dequeue, perform a best effort dequeue - if len(payloads) < MaxPayloads { - payloads = p.bestEffortDequeue(latestBlock, start, payloads) - } - default: - logs := p.buffer.dequeueRange(start, latestBlock, AllowedLogsPerUpkeep, MaxPayloads) - for _, l := range logs { - payload, err := p.createPayload(l.upkeepID, l.log) - if err == nil { - payloads = append(payloads, payload) - } - } + // if we have remaining capacity following minimum commitment dequeue, perform a best effort dequeue + if len(payloads) < MaxPayloads { + payloads = p.bestEffortDequeue(latestBlock, start, payloads) } return payloads @@ -290,7 +259,7 @@ func (p *logEventProvider) minimumCommitmentDequeue(latestBlock, start int64) [] startWindow, _ := getBlockWindow(start, blockRate) // dequeue the minimum number logs (log limit, varies by chain) per upkeep for this block window - logs, remaining := p.bufferV1.Dequeue(startWindow, MaxPayloads-len(payloads), true) + logs, remaining := p.buffer.Dequeue(startWindow, MaxPayloads-len(payloads), true) if len(logs) > 0 { p.lggr.Debugw("minimum commitment dequeue", "start", start, "latestBlock", latestBlock, "logs", len(logs), "remaining", remaining) } @@ -316,7 +285,7 @@ func (p *logEventProvider) bestEffortDequeue(latestBlock, start int64, payloads startWindow, _ := getBlockWindow(start, blockRate) // dequeue as many logs as we can, based on remaining capacity, for this block window - logs, remaining := p.bufferV1.Dequeue(startWindow, MaxPayloads-len(payloads), false) + logs, remaining := p.buffer.Dequeue(startWindow, MaxPayloads-len(payloads), false) if len(logs) > 0 { p.lggr.Debugw("best effort dequeue", "start", start, "latestBlock", latestBlock, "logs", len(logs), "remaining", remaining) } @@ -522,12 +491,8 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ } filteredLogs := filter.Select(logs...) - switch p.opts.BufferVersion { - case BufferVersionV1: - p.bufferV1.Enqueue(filter.upkeepID, filteredLogs...) - default: - p.buffer.enqueue(filter.upkeepID, filteredLogs...) - } + p.buffer.Enqueue(filter.upkeepID, filteredLogs...) + // Update the lastPollBlock for filter in slice this is then // updated into filter store in updateFiltersLastPoll filters[i].lastPollBlock = latest @@ -535,16 +500,3 @@ func (p *logEventProvider) readLogs(ctx context.Context, latest int64, filters [ return merr } - -func (p *logEventProvider) syncBufferFilters() error { - p.lock.RLock() - buffVersion := p.opts.BufferVersion - p.lock.RUnlock() - - switch buffVersion { - case BufferVersionV1: - return p.bufferV1.SyncFilters(p.filterStore) - default: - return nil - } -} diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go index 282f89d370c..9536a24ce06 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/provider_test.go @@ -258,37 +258,14 @@ func TestLogEventProvider_ReadLogs(t *testing.T) { filterStore := NewUpkeepFilterStore() p := NewLogProvider(logger.TestLogger(t), mp, big.NewInt(1), &mockedPacker{}, filterStore, NewOptions(200, big.NewInt(1))) - var ids []*big.Int for i := 0; i < 10; i++ { cfg, f := newEntry(p, i+1) - ids = append(ids, f.upkeepID) require.NoError(t, p.RegisterFilter(ctx, FilterOptions{ UpkeepID: f.upkeepID, TriggerConfig: cfg, })) } - t.Run("no entries", func(t *testing.T) { - require.NoError(t, p.ReadLogs(ctx, big.NewInt(999999))) - logs := p.buffer.peek(10) - require.Len(t, logs, 0) - }) - - t.Run("has entries", func(t *testing.T) { - require.NoError(t, p.ReadLogs(ctx, ids[:2]...)) - logs := p.buffer.peek(10) - require.Len(t, logs, 2) - - var updatedFilters []upkeepFilter - filterStore.RangeFiltersByIDs(func(i int, f upkeepFilter) { - updatedFilters = append(updatedFilters, f.Clone()) - }, ids[:2]...) - for _, f := range updatedFilters { - // Last poll block should be updated - require.Equal(t, int64(1), f.lastPollBlock) - } - }) - // TODO: test rate limiting } @@ -342,7 +319,6 @@ func remainingBlockWindowCounts(queues map[string]*upkeepLogQueue, blockRate int func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("dequeuing from an empty buffer returns 0 logs", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -361,7 +337,6 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("a single log for a single upkeep gets dequeued", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -373,7 +348,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - buffer := provider.bufferV1 + buffer := provider.buffer buffer.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}) @@ -384,7 +359,6 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("a log per upkeep for 4 upkeeps across 4 blocks (2 separate block windows) is dequeued, for a total of 4 payloads", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -396,7 +370,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - buffer := provider.bufferV1 + buffer := provider.buffer buffer.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: 1, TxHash: common.HexToHash("0x1"), LogIndex: 0}) buffer.Enqueue(big.NewInt(2), logpoller.Log{BlockNumber: 2, TxHash: common.HexToHash("0x2"), LogIndex: 0}) @@ -410,7 +384,6 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("100 logs are dequeued for a single upkeep, 1 log for every block window across 100 blocks followed by best effort", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -422,7 +395,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - buffer := provider.bufferV1.(*logBuffer) + buffer := provider.buffer.(*logBuffer) for i := 0; i < 100; i++ { buffer.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: int64(i + 1), TxHash: common.HexToHash(fmt.Sprintf("0x%d", i+1)), LogIndex: 0}) @@ -439,7 +412,6 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("100 logs are dequeued for two upkeeps, 25 logs each as min commitment (50 logs total best effort), followed by best effort", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -451,7 +423,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - buffer := provider.bufferV1.(*logBuffer) + buffer := provider.buffer.(*logBuffer) for i := 0; i < 100; i++ { buffer.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: int64(i + 1), TxHash: common.HexToHash(fmt.Sprintf("0x1%d", i+1)), LogIndex: 0}) @@ -491,7 +463,6 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("minimum guaranteed for all windows including an incomplete window followed by best effort", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -503,7 +474,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - buffer := provider.bufferV1.(*logBuffer) + buffer := provider.buffer.(*logBuffer) for i := 0; i < 102; i++ { buffer.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: int64(i + 1), TxHash: common.HexToHash(fmt.Sprintf("0x1%d", i+1)), LogIndex: 0}) @@ -551,7 +522,6 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("min dequeue followed by best effort followed by reorg followed by best effort", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -563,7 +533,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - buffer := provider.bufferV1.(*logBuffer) + buffer := provider.buffer.(*logBuffer) for i := 0; i < 100; i++ { buffer.Enqueue(big.NewInt(1), logpoller.Log{BlockNumber: int64(i + 1), TxHash: common.HexToHash(fmt.Sprintf("0x1%d", i+1)), LogIndex: 0}) @@ -606,7 +576,6 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { t.Run("sparsely populated blocks", func(t *testing.T) { opts := NewOptions(200, big.NewInt(42161)) - opts.BufferVersion = "v1" logPoller := &mockLogPoller{ LatestBlockFn: func(ctx context.Context) (int64, error) { @@ -618,7 +587,7 @@ func TestLogEventProvider_GetLatestPayloads(t *testing.T) { ctx := context.Background() - buffer := provider.bufferV1.(*logBuffer) + buffer := provider.buffer.(*logBuffer) upkeepOmittedOnBlocks := map[int64][]int{ 1: {5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100}, // upkeep 1 won't have logs on 20 blocks diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go index cd992b1ea61..88e7e79bbf2 100644 --- a/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/integration_21_test.go @@ -54,7 +54,6 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/services/job" "github.com/smartcontractkit/chainlink/v2/core/services/keystore/keys/ethkey" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper" - "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury" "github.com/smartcontractkit/chainlink/v2/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/mercury/streams" "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" @@ -119,7 +118,7 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { require.NoError(t, err) registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) - setupNodes(t, nodeKeys, registry, backend, steve, false) + setupNodes(t, nodeKeys, registry, backend, steve) <-time.After(time.Second * 5) @@ -173,368 +172,314 @@ func TestIntegration_KeeperPluginConditionalUpkeep(t *testing.T) { } func TestIntegration_KeeperPluginLogUpkeep(t *testing.T) { - tests := []struct { - name string - logBufferVersion logprovider.BufferVersion - }{ - { - name: "default buffer", - logBufferVersion: logprovider.BufferVersionDefault, - }, - { - name: "buffer v1", - logBufferVersion: logprovider.BufferVersionV1, - }, - } + g := gomega.NewWithT(t) - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - g := gomega.NewWithT(t) - - // setup blockchain - sergey := testutils.MustNewSimTransactor(t) // owns all the link - steve := testutils.MustNewSimTransactor(t) // registry owner - carrol := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - sergey.From: {Balance: assets.Ether(10000).ToInt()}, - steve.From: {Balance: assets.Ether(10000).ToInt()}, - carrol.From: {Balance: assets.Ether(10000).ToInt()}, - } - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} - } + // setup blockchain + sergey := testutils.MustNewSimTransactor(t) // owns all the link + steve := testutils.MustNewSimTransactor(t) // registry owner + carrol := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + sergey.From: {Balance: assets.Ether(10000).ToInt()}, + steve.From: {Balance: assets.Ether(10000).ToInt()}, + carrol.From: {Balance: assets.Ether(10000).ToInt()}, + } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) - require.NoError(t, err) - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) + require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(steve, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) - setupNodes(t, nodeKeys, registry, backend, steve, tc.logBufferVersion == logprovider.BufferVersionV1) - upkeeps := 1 + registry := deployKeeper21Registry(t, steve, backend, linkAddr, linkFeedAddr, gasFeedAddr) + setupNodes(t, nodeKeys, registry, backend, steve) + upkeeps := 1 - _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) - require.NoError(t, err) + _, err = linkToken.Transfer(sergey, carrol.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeeps+1)))) + require.NoError(t, err) - backend.Commit() + backend.Commit() - ids, addrs, contracts := deployUpkeeps(t, backend, carrol, steve, linkToken, registry, upkeeps) - require.Equal(t, upkeeps, len(ids)) - require.Equal(t, len(ids), len(contracts)) - require.Equal(t, len(ids), len(addrs)) + ids, addrs, contracts := deployUpkeeps(t, backend, carrol, steve, linkToken, registry, upkeeps) + require.Equal(t, upkeeps, len(ids)) + require.Equal(t, len(ids), len(contracts)) + require.Equal(t, len(ids), len(addrs)) - backend.Commit() + backend.Commit() - emits := 1 - go emitEvents(testutils.Context(t), t, emits, contracts, carrol, func() { - backend.Commit() - }) + emits := 1 + go emitEvents(testutils.Context(t), t, emits, contracts, carrol, func() { + backend.Commit() + }) - listener, done := listenPerformed(t, backend, registry, ids, int64(1)) - g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) - done() + listener, done := listenPerformed(t, backend, registry, ids, int64(1)) + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + done() - t.Run("recover logs", func(t *testing.T) { - addr, contract := addrs[0], contracts[0] - upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) + t.Run("recover logs", func(t *testing.T) { + addr, contract := addrs[0], contracts[0] + upkeepID := registerUpkeep(t, registry, addr, carrol, steve, backend) + backend.Commit() + t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) + // Emit 100 logs in a burst + recoverEmits := 100 + i := 0 + emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { + i++ + if i%(recoverEmits/4) == 0 { backend.Commit() - t.Logf("Registered new upkeep %s for address %s", upkeepID.String(), addr.String()) - // Emit 100 logs in a burst - recoverEmits := 100 - i := 0 - emitEvents(testutils.Context(t), t, 100, []*log_upkeep_counter_wrapper.LogUpkeepCounter{contract}, carrol, func() { - i++ - if i%(recoverEmits/4) == 0 { - backend.Commit() - time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors - } - }) + time.Sleep(time.Millisecond * 250) // otherwise we get "invalid transaction nonce" errors + } + }) - beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() + beforeDummyBlocks := backend.Blockchain().CurrentBlock().Number.Uint64() - // Mine enough blocks to ensure these logs don't fall into log provider range - dummyBlocks := 500 - for i := 0; i < dummyBlocks; i++ { - backend.Commit() - time.Sleep(time.Millisecond * 10) - } + // Mine enough blocks to ensure these logs don't fall into log provider range + dummyBlocks := 500 + for i := 0; i < dummyBlocks; i++ { + backend.Commit() + time.Sleep(time.Millisecond * 10) + } - t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) + t.Logf("Mined %d blocks, waiting for logs to be recovered", dummyBlocks) - listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) - defer done() - g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) - }) - }) - } + listener, done := listenPerformedN(t, backend, registry, ids, int64(beforeDummyBlocks), recoverEmits) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t), cltest.DBPollingInterval).Should(gomega.BeTrue()) + }) } func TestIntegration_KeeperPluginLogUpkeep_Retry(t *testing.T) { - tests := []struct { - name string - logBufferVersion logprovider.BufferVersion - }{ - { - name: "default buffer", - logBufferVersion: logprovider.BufferVersionDefault, - }, - { - name: "buffer v1", - logBufferVersion: logprovider.BufferVersionV1, - }, + g := gomega.NewWithT(t) + + // setup blockchain + linkOwner := testutils.MustNewSimTransactor(t) // owns all the link + registryOwner := testutils.MustNewSimTransactor(t) // registry owner + upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, + registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, + upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - g := gomega.NewWithT(t) - - // setup blockchain - linkOwner := testutils.MustNewSimTransactor(t) // owns all the link - registryOwner := testutils.MustNewSimTransactor(t) // registry owner - upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, - registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, - upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, - } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} - } + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) + require.NoError(t, err) - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) - require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) - registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) + _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) - _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner, tc.logBufferVersion == logprovider.BufferVersionV1) + const upkeepCount = 10 + const mercuryFailCount = upkeepCount * 3 * 2 - const upkeepCount = 10 - const mercuryFailCount = upkeepCount * 3 * 2 + // testing with the mercury server involves mocking responses. currently, + // there is not a way to connect a mercury call to an upkeep id (though we + // could add custom headers) so the test must be fairly basic and just + // count calls before switching to successes + var ( + mu sync.Mutex + count int + ) - // testing with the mercury server involves mocking responses. currently, - // there is not a way to connect a mercury call to an upkeep id (though we - // could add custom headers) so the test must be fairly basic and just - // count calls before switching to successes - var ( - mu sync.Mutex - count int - ) + mercuryServer.RegisterHandler(func(w http.ResponseWriter, r *http.Request) { + mu.Lock() + defer mu.Unlock() - mercuryServer.RegisterHandler(func(w http.ResponseWriter, r *http.Request) { - mu.Lock() - defer mu.Unlock() + count++ - count++ + _ = r.ParseForm() - _ = r.ParseForm() + t.Logf("MercuryHTTPServe:RequestURI: %s", r.RequestURI) - t.Logf("MercuryHTTPServe:RequestURI: %s", r.RequestURI) + for key, value := range r.Form { + t.Logf("MercuryHTTPServe:FormValue: key: %s; value: %s;", key, value) + } - for key, value := range r.Form { - t.Logf("MercuryHTTPServe:FormValue: key: %s; value: %s;", key, value) - } + // the streams lookup retries against the remote server 3 times before + // returning a result as retryable. + // the simulation here should force the streams lookup process to return + // retryable 2 times. + // the total count of failures should be (upkeepCount * 3 * tryCount) + if count <= mercuryFailCount { + w.WriteHeader(http.StatusNotFound) - // the streams lookup retries against the remote server 3 times before - // returning a result as retryable. - // the simulation here should force the streams lookup process to return - // retryable 2 times. - // the total count of failures should be (upkeepCount * 3 * tryCount) - if count <= mercuryFailCount { - w.WriteHeader(http.StatusNotFound) + return + } - return - } + // start sending success messages + output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` - // start sending success messages - output := `{"chainlinkBlob":"0x0001c38d71fed6c320b90e84b6f559459814d068e2a1700adc931ca9717d4fe70000000000000000000000000000000000000000000000000000000001a80b52b4bf1233f9cb71144a253a1791b202113c4ab4a92fa1b176d684b4959666ff8200000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001004254432d5553442d415242495452554d2d544553544e4554000000000000000000000000000000000000000000000000000000000000000000000000645570be000000000000000000000000000000000000000000000000000002af2b818dc5000000000000000000000000000000000000000000000000000002af2426faf3000000000000000000000000000000000000000000000000000002af32dc209700000000000000000000000000000000000000000000000000000000012130f8df0a9745bb6ad5e2df605e158ba8ad8a33ef8a0acf9851f0f01668a3a3f2b68600000000000000000000000000000000000000000000000000000000012130f60000000000000000000000000000000000000000000000000000000000000002c4a7958dce105089cf5edb68dad7dcfe8618d7784eb397f97d5a5fade78c11a58275aebda478968e545f7e3657aba9dcbe8d44605e4c6fde3e24edd5e22c94270000000000000000000000000000000000000000000000000000000000000002459c12d33986018a8959566d145225f0c4a4e61a9a3f50361ccff397899314f0018162cf10cd89897635a0bb62a822355bd199d09f4abe76e4d05261bb44733d"}` + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(output)) + }) - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(output)) - }) + defer mercuryServer.Stop() - defer mercuryServer.Stop() + _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) + require.NoError(t, err) - _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) - require.NoError(t, err) + backend.Commit() - backend.Commit() + feeds, err := newFeedLookupUpkeepController(backend, registryOwner) + require.NoError(t, err, "no error expected from creating a feed lookup controller") + + // deploy multiple upkeeps that listen to a log emitter and need to be + // performed for each log event + _ = feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, func(int) bool { + return false + }) + _ = feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken) + _ = feeds.EnableMercury(t, backend, registry, registryOwner) + _ = feeds.VerifyEnv(t, backend, registry, registryOwner) - feeds, err := newFeedLookupUpkeepController(backend, registryOwner) - require.NoError(t, err, "no error expected from creating a feed lookup controller") - - // deploy multiple upkeeps that listen to a log emitter and need to be - // performed for each log event - _ = feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, func(int) bool { - return false - }) - _ = feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken) - _ = feeds.EnableMercury(t, backend, registry, registryOwner) - _ = feeds.VerifyEnv(t, backend, registry, registryOwner) - - // start emitting events in a separate go-routine - // feed lookup relies on a single contract event log to perform multiple - // listener contracts - go func() { - // only 1 event is necessary to make all 10 upkeeps eligible - _ = feeds.EmitEvents(t, backend, 1, func() { - // pause per emit for expected block production time - time.Sleep(3 * time.Second) - }) - }() - - listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) - defer done() - g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) + // start emitting events in a separate go-routine + // feed lookup relies on a single contract event log to perform multiple + // listener contracts + go func() { + // only 1 event is necessary to make all 10 upkeeps eligible + _ = feeds.EmitEvents(t, backend, 1, func() { + // pause per emit for expected block production time + time.Sleep(3 * time.Second) }) - } + }() + + listener, done := listenPerformed(t, backend, registry, feeds.UpkeepsIds(), int64(1)) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) } func TestIntegration_KeeperPluginLogUpkeep_ErrHandler(t *testing.T) { - tests := []struct { - name string - logBufferVersion logprovider.BufferVersion - }{ - { - name: "default buffer", - logBufferVersion: logprovider.BufferVersionDefault, - }, - { - name: "buffer v1", - logBufferVersion: logprovider.BufferVersionV1, - }, + g := gomega.NewWithT(t) + + // setup blockchain + linkOwner := testutils.MustNewSimTransactor(t) // owns all the link + registryOwner := testutils.MustNewSimTransactor(t) // registry owner + upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner + genesisData := core.GenesisAlloc{ + linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, + registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, + upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - g := gomega.NewWithT(t) - - // setup blockchain - linkOwner := testutils.MustNewSimTransactor(t) // owns all the link - registryOwner := testutils.MustNewSimTransactor(t) // registry owner - upkeepOwner := testutils.MustNewSimTransactor(t) // upkeep owner - genesisData := core.GenesisAlloc{ - linkOwner.From: {Balance: assets.Ether(10000).ToInt()}, - registryOwner.From: {Balance: assets.Ether(10000).ToInt()}, - upkeepOwner.From: {Balance: assets.Ether(10000).ToInt()}, - } + // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether + var nodeKeys [5]ethkey.KeyV2 + for i := int64(0); i < 5; i++ { + nodeKeys[i] = cltest.MustGenerateRandomKey(t) + genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} + } - // Generate 5 keys for nodes (1 bootstrap + 4 ocr nodes) and fund them with ether - var nodeKeys [5]ethkey.KeyV2 - for i := int64(0); i < 5; i++ { - nodeKeys[i] = cltest.MustGenerateRandomKey(t) - genesisData[nodeKeys[i].Address] = core.GenesisAccount{Balance: assets.Ether(1000).ToInt()} - } + backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) + stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain + defer stopMining() - backend := cltest.NewSimulatedBackend(t, genesisData, uint32(ethconfig.Defaults.Miner.GasCeil)) - stopMining := cltest.Mine(backend, 3*time.Second) // Should be greater than deltaRound since we cannot access old blocks on simulated blockchain - defer stopMining() + // Deploy registry + linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) + require.NoError(t, err) - // Deploy registry - linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(linkOwner, backend) - require.NoError(t, err) + gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) + require.NoError(t, err) - gasFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(60000000000)) - require.NoError(t, err) + linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) + require.NoError(t, err) - linkFeedAddr, _, _, err := mock_v3_aggregator_contract.DeployMockV3AggregatorContract(registryOwner, backend, 18, big.NewInt(2000000000000000000)) - require.NoError(t, err) + registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) - registry := deployKeeper21Registry(t, registryOwner, backend, linkAddr, linkFeedAddr, gasFeedAddr) + _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner) - _, mercuryServer := setupNodes(t, nodeKeys, registry, backend, registryOwner, tc.logBufferVersion == logprovider.BufferVersionV1) + upkeepCount := 10 - upkeepCount := 10 + errResponses := []int{ + http.StatusUnauthorized, + http.StatusBadRequest, + http.StatusInternalServerError, + http.StatusNotFound, + http.StatusNotFound, + http.StatusNotFound, + http.StatusUnauthorized, + } + startMercuryServer(t, mercuryServer, func(i int) (int, []byte) { + var resp int + if i < len(errResponses) { + resp = errResponses[i] + } + if resp == 0 { + resp = http.StatusNotFound + } + return resp, nil + }) + defer mercuryServer.Stop() - errResponses := []int{ - http.StatusUnauthorized, - http.StatusBadRequest, - http.StatusInternalServerError, - http.StatusNotFound, - http.StatusNotFound, - http.StatusNotFound, - http.StatusUnauthorized, - } - startMercuryServer(t, mercuryServer, func(i int) (int, []byte) { - var resp int - if i < len(errResponses) { - resp = errResponses[i] - } - if resp == 0 { - resp = http.StatusNotFound - } - return resp, nil - }) - defer mercuryServer.Stop() + _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) + require.NoError(t, err) - _, err = linkToken.Transfer(linkOwner, upkeepOwner.From, big.NewInt(0).Mul(oneHunEth, big.NewInt(int64(upkeepCount+1)))) - require.NoError(t, err) + backend.Commit() - backend.Commit() + feeds, err := newFeedLookupUpkeepController(backend, registryOwner) + require.NoError(t, err, "no error expected from creating a feed lookup controller") - feeds, err := newFeedLookupUpkeepController(backend, registryOwner) - require.NoError(t, err, "no error expected from creating a feed lookup controller") + // deploy multiple upkeeps that listen to a log emitter and need to be + // performed for each log event + checkResultsProvider := func(i int) bool { + return i%2 == 1 + } + require.NoError(t, feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, checkResultsProvider)) + require.NoError(t, feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken)) + require.NoError(t, feeds.EnableMercury(t, backend, registry, registryOwner)) + require.NoError(t, feeds.VerifyEnv(t, backend, registry, registryOwner)) + + startBlock := backend.Blockchain().CurrentBlock().Number.Int64() + // start emitting events in a separate go-routine + // feed lookup relies on a single contract event log to perform multiple + // listener contracts + go func() { + // only 1 event is necessary to make all 10 upkeeps eligible + _ = feeds.EmitEvents(t, backend, 1, func() { + // pause per emit for expected block production time + time.Sleep(3 * time.Second) + }) + }() - // deploy multiple upkeeps that listen to a log emitter and need to be - // performed for each log event - checkResultsProvider := func(i int) bool { - return i%2 == 1 - } - require.NoError(t, feeds.DeployUpkeeps(t, backend, upkeepOwner, upkeepCount, checkResultsProvider)) - require.NoError(t, feeds.RegisterAndFund(t, registry, registryOwner, backend, linkToken)) - require.NoError(t, feeds.EnableMercury(t, backend, registry, registryOwner)) - require.NoError(t, feeds.VerifyEnv(t, backend, registry, registryOwner)) - - startBlock := backend.Blockchain().CurrentBlock().Number.Int64() - // start emitting events in a separate go-routine - // feed lookup relies on a single contract event log to perform multiple - // listener contracts - go func() { - // only 1 event is necessary to make all 10 upkeeps eligible - _ = feeds.EmitEvents(t, backend, 1, func() { - // pause per emit for expected block production time - time.Sleep(3 * time.Second) - }) - }() - - go makeDummyBlocks(t, backend, 3*time.Second, 1000) - - idsToCheck := make([]*big.Int, 0) - for i, uid := range feeds.UpkeepsIds() { - if checkResultsProvider(i) { - idsToCheck = append(idsToCheck, uid) - } - } + go makeDummyBlocks(t, backend, 3*time.Second, 1000) - listener, done := listenPerformed(t, backend, registry, idsToCheck, startBlock) - defer done() - g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) - }) + idsToCheck := make([]*big.Int, 0) + for i, uid := range feeds.UpkeepsIds() { + if checkResultsProvider(i) { + idsToCheck = append(idsToCheck, uid) + } } + + listener, done := listenPerformed(t, backend, registry, idsToCheck, startBlock) + defer done() + g.Eventually(listener, testutils.WaitTimeout(t)-(5*time.Second), cltest.DBPollingInterval).Should(gomega.BeTrue()) } func startMercuryServer(t *testing.T, mercuryServer *mercury.SimulatedMercuryServer, responder func(i int) (int, []byte)) { @@ -644,7 +589,7 @@ func listenPerformed(t *testing.T, backend *backends.SimulatedBackend, registry return listenPerformedN(t, backend, registry, ids, startBlock, 0) } -func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts, useBufferV1 bool) ([]Node, *mercury.SimulatedMercuryServer) { +func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IKeeperRegistryMaster, backend *backends.SimulatedBackend, usr *bind.TransactOpts) ([]Node, *mercury.SimulatedMercuryServer) { lggr := logger.TestLogger(t) mServer := mercury.NewSimulatedMercuryServer() mServer.Start() @@ -727,8 +672,7 @@ func setupNodes(t *testing.T, nodeKeys [5]ethkey.KeyV2, registry *iregistry21.IK cacheEvictionInterval = "1s" mercuryCredentialName = "%s" contractVersion = "v2.1" - useBufferV1 = %v - `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName, useBufferV1)) + `, i, registry.Address(), node.KeyBundle.ID(), node.Transmitter, fmt.Sprintf("%s@127.0.0.1:%d", bootstrapPeerID, bootstrapNodePort), MercuryCredName)) } // Setup config on contract diff --git a/integration-tests/actions/automationv2/actions.go b/integration-tests/actions/automationv2/actions.go index f7d495bda5a..40caf15917b 100644 --- a/integration-tests/actions/automationv2/actions.go +++ b/integration-tests/actions/automationv2/actions.go @@ -88,8 +88,7 @@ type AutomationTest struct { mercuryCredentialName string TransmitterKeyIndex int - Logger zerolog.Logger - useLogBufferV1 bool + Logger zerolog.Logger } type UpkeepConfig struct { @@ -117,7 +116,6 @@ func NewAutomationTestK8s( TransmitterKeyIndex: 0, UpkeepPrivilegeManager: chainClient.MustGetRootKeyAddress(), mercuryCredentialName: "", - useLogBufferV1: false, Logger: l, } } @@ -134,7 +132,6 @@ func NewAutomationTestDocker( TransmitterKeyIndex: 0, UpkeepPrivilegeManager: chainClient.MustGetRootKeyAddress(), mercuryCredentialName: "", - useLogBufferV1: false, Logger: l, } } @@ -147,10 +144,6 @@ func (a *AutomationTest) SetMercuryCredentialName(name string) { a.mercuryCredentialName = name } -func (a *AutomationTest) SetUseLogBufferV1(flag bool) { - a.useLogBufferV1 = flag -} - func (a *AutomationTest) SetTransmitterKeyIndex(index int) { a.TransmitterKeyIndex = index } @@ -422,9 +415,6 @@ func (a *AutomationTest) AddAutomationJobs() error { if a.mercuryCredentialName != "" { pluginCfg["mercuryCredentialName"] = "\"" + a.mercuryCredentialName + "\"" } - if a.useLogBufferV1 { - pluginCfg["useBufferV1"] = "true" - } } for i := 1; i < len(a.ChainlinkNodes); i++ { autoOCR2JobSpec := client.OCR2TaskJobSpec{ diff --git a/integration-tests/load/automationv2_1/automationv2_1_test.go b/integration-tests/load/automationv2_1/automationv2_1_test.go index b0555cdd3f8..0fe13d4bc73 100644 --- a/integration-tests/load/automationv2_1/automationv2_1_test.go +++ b/integration-tests/load/automationv2_1/automationv2_1_test.go @@ -368,10 +368,6 @@ Load Config: a.SetMercuryCredentialName("cred1") } - if *conf.UseLogBufferV1 { - a.SetUseLogBufferV1(true) - } - startTimeTestSetup := time.Now() l.Info().Str("START_TIME", startTimeTestSetup.String()).Msg("Test setup started") diff --git a/integration-tests/testconfig/automation/config.go b/integration-tests/testconfig/automation/config.go index bde989f544b..e6df7714af7 100644 --- a/integration-tests/testconfig/automation/config.go +++ b/integration-tests/testconfig/automation/config.go @@ -160,7 +160,6 @@ type AutomationConfig struct { PluginConfig *PluginConfig `toml:"PluginConfig"` PublicConfig *PublicConfig `toml:"PublicConfig"` RegistrySettings *RegistrySettings `toml:"RegistrySettings"` - UseLogBufferV1 *bool `toml:"use_log_buffer_v1"` } func (c *AutomationConfig) Validate() error { @@ -170,13 +169,7 @@ func (c *AutomationConfig) Validate() error { if err := c.PublicConfig.Validate(); err != nil { return err } - if err := c.RegistrySettings.Validate(); err != nil { - return err - } - if c.UseLogBufferV1 == nil { - return errors.New("use_log_buffer_v1 must be set") - } - return nil + return c.RegistrySettings.Validate() } type PluginConfig struct {