Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix logpoller integration tests #11440

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions config-patch.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
diff --git a/docs/CONFIG.md b/docs/CONFIG.md
index 63c20bdf4a..fe30c7b945 100644
--- a/docs/CONFIG.md
+++ b/docs/CONFIG.md
@@ -5435,8 +5435,9 @@ A re-org occurs at height 47 starting at block 41, transaction is NOT marked for
```toml
FinalityTagEnabled = false # Default
```
-FinalityTagEnabled means that the chain supports the finalized block tag when querying for a block. If FinalityTagEnabled is set to true for a chain, then FinalityDepth field is ignored.
-Finality for a block is solely defined by the finality related tags provided by the chain's RPC API. This is a placeholder and hasn't been implemented yet.
+FinalityTagEnabled will mean that the chain supports the finalized block tag when querying for a block. If FinalityTagEnabled is set to true for a chain, then FinalityDepth field will be (once fully
+implemented) ignored: finality for a block will be solely defined by the finality tags provided by the chain's RPC API. This feature is new and still only partially implemeneted. LogPoller respects it,
+but HeadTracker still relies on FinalityDepth which must be set to something sensible even if FinalityTagEnabled = true.

### FlagsContractAddress
:warning: **_ADVANCED_**: _Do not change this setting unless you know what you are doing._
37 changes: 16 additions & 21 deletions integration-tests/universal/log_poller/scenarios.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,18 +83,18 @@ func ExecuteBasicLogPollerTest(t *testing.T, cfg *Config) {
expectedFilters := getExpectedFilters(logEmitters, cfg)
gom := gomega.NewGomegaWithT(t)
gom.Eventually(func(g gomega.Gomega) {
hasFilters := false
for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
nodeName := testEnv.ClCluster.Nodes[i].ContainerName
l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB")

hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
hasFilters, err = nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
if err != nil {
l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...")
return
break
}

g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
}
g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
}, "30s", "1s").Should(gomega.Succeed())
l.Info().Msg("All nodes have expected filters registered")
l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count")
Expand Down Expand Up @@ -191,6 +191,7 @@ func ExecuteLogPollerReplay(t *testing.T, cfg *Config, consistencyTimeout string
}

l.Info().Msg("Starting replay log poller test")
l.Info().Msg("( hasFilters bug fixed )")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

remove?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could have sworn I'd removed that... gone now


var (
err error
Expand Down Expand Up @@ -232,6 +233,7 @@ func ExecuteLogPollerReplay(t *testing.T, cfg *Config, consistencyTimeout string
sb, err := testEnv.EVMClient.LatestBlockNumber(testcontext.Get(t))
require.NoError(t, err, "Error getting latest block number")
startBlock := int64(sb)
l.Info().Int64("Starting Block: ", startBlock)

l.Info().Msg("STARTING EVENT EMISSION")
startTime := time.Now()
Expand All @@ -249,6 +251,8 @@ func ExecuteLogPollerReplay(t *testing.T, cfg *Config, consistencyTimeout string
endBlock, err := GetEndBlockToWaitFor(int64(eb), testEnv.EVMClient.GetChainID().Int64(), cfg)
require.NoError(t, err, "Error getting end block to wait for")

l.Info().Int64("Ending Block: ", endBlock)

// Lets make sure no logs are in DB yet
expectedFilters := getExpectedFilters(logEmitters, cfg)
logCountMatches, err := clNodesHaveExpectedLogCount(startBlock, endBlock, testEnv.EVMClient.GetChainID(), 0, expectedFilters, l, coreLogger, testEnv.ClCluster)
Expand All @@ -274,31 +278,22 @@ func ExecuteLogPollerReplay(t *testing.T, cfg *Config, consistencyTimeout string
// Make sure that all nodes have expected filters registered before starting to emit events
gom := gomega.NewGomegaWithT(t)
gom.Eventually(func(g gomega.Gomega) {
hasFilters := false
for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
nodeName := testEnv.ClCluster.Nodes[i].ContainerName
l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB")

hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
hasFilters, err = nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
if err != nil {
l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...")
return
break
}

g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
}
g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
}, "30s", "1s").Should(gomega.Succeed())
l.Info().Msg("All nodes have expected filters registered")
l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count")

l.Warn().Str("Duration", "1m").Msg("Waiting for all CL nodes to have end block finalised")
gom.Eventually(func(g gomega.Gomega) {
hasFinalised, err := logPollerHasFinalisedEndBlock(endBlock, testEnv.EVMClient.GetChainID(), l, coreLogger, testEnv.ClCluster)
if err != nil {
l.Warn().Err(err).Msg("Error checking if nodes have finalised end block. Retrying...")
}
g.Expect(hasFinalised).To(gomega.BeTrue(), "Some nodes have not finalised end block")
}, "1m", "30s").Should(gomega.Succeed())

// Trigger replay
l.Info().Msg("Triggering log poller's replay")
for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
Expand Down Expand Up @@ -402,18 +397,18 @@ func ExecuteCILogPollerTest(t *testing.T, cfg *Config) {
expectedFilters := getExpectedFilters(logEmitters, cfg)
gom := gomega.NewGomegaWithT(t)
gom.Eventually(func(g gomega.Gomega) {
hasFilters := false
for i := 1; i < len(testEnv.ClCluster.Nodes); i++ {
nodeName := testEnv.ClCluster.Nodes[i].ContainerName
l.Info().Str("Node name", nodeName).Msg("Fetching filters from log poller's DB")

hasFilters, err := nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
hasFilters, err = nodeHasExpectedFilters(expectedFilters, coreLogger, testEnv.EVMClient.GetChainID(), testEnv.ClCluster.Nodes[i].PostgresDb)
if err != nil {
l.Warn().Err(err).Msg("Error checking if node has expected filters. Retrying...")
return
break
}

g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
}
g.Expect(hasFilters).To(gomega.BeTrue(), "Not all expected filters were found in the DB")
}, "1m", "1s").Should(gomega.Succeed())
l.Info().Msg("All nodes have expected filters registered")
l.Info().Int("Count", len(expectedFilters)).Msg("Expected filters count")
Expand Down
Loading