From e9a97ad3588d32dd3cfc062acb435f5fa9a30abd Mon Sep 17 00:00:00 2001 From: Makram Date: Mon, 16 Oct 2023 12:29:42 +0300 Subject: [PATCH 1/9] update bhs wrapper generation (#10944) Update BHS wrapper generation to use 0.8.6 rather than 0.6 since it's using the more updated version of ChainSpecificUtil. --- .../generated/blockhash_store/blockhash_store.go | 2 +- .../generated-wrapper-dependency-versions-do-not-edit.txt | 2 +- core/gethwrappers/go_generate.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/core/gethwrappers/generated/blockhash_store/blockhash_store.go b/core/gethwrappers/generated/blockhash_store/blockhash_store.go index 449df2715ea..8711f13b2d6 100644 --- a/core/gethwrappers/generated/blockhash_store/blockhash_store.go +++ b/core/gethwrappers/generated/blockhash_store/blockhash_store.go @@ -30,7 +30,7 @@ var ( var BlockhashStoreMetaData = &bind.MetaData{ ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"getBlockhash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"}],\"name\":\"store\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"storeEarliest\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"n\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"header\",\"type\":\"bytes\"}],\"name\":\"storeVerifyHeader\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405234801561001057600080fd5b506104b1806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80636057361d1461005157806383b6d6b714610070578063e9413d3814610078578063fadff0e1146100a7575b600080fd5b61006e6004803603602081101561006757600080fd5b5035610154565b005b61006e6101df565b6100956004803603602081101561008e57600080fd5b50356101f5565b60408051918252519081900360200190f35b61006e600480360360408110156100bd57600080fd5b813591908101906040810160208201356401000000008111156100df57600080fd5b8201836020820111156100f157600080fd5b8035906020019184600183028401116401000000008311171561011357600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550610278945050505050565b600061015f82610318565b9050806101cd57604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f626c6f636b68617368286e29206661696c656400000000000000000000000000604482015290519081900360640190fd5b60009182526020829052604090912055565b6101f36101006101ed6103f8565b03610154565b565b6000818152602081905260408120548061027057604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f626c6f636b68617368206e6f7420666f756e6420696e2073746f726500000000604482015290519081900360640190fd5b90505b919050565b6000808360010181526020019081526020016000205481805190602001201461030257604080517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f6865616465722068617320756e6b6e6f776e20626c6f636b6861736800000000604482015290519081900360640190fd5b6024015160009182526020829052604090912055565b6000806103236104a0565b905061a4b1811480610337575062066eed81145b156103f257610100836103486103f8565b03118061035c57506103586103f8565b8310155b1561036b576000915050610273565b606473ffffffffffffffffffffffffffffffffffffffff16632b407a82846040518263ffffffff1660e01b81526004018082815260200191505060206040518083038186803b1580156103bd57600080fd5b505afa1580156103d1573d6000803e3d6000fd5b505050506040513d60208110156103e757600080fd5b505191506102739050565b50504090565b6000806104036104a0565b905061a4b1811480610417575062066eed81145b1561049857606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b15801561046357600080fd5b505afa158015610477573d6000803e3d6000fd5b505050506040513d602081101561048d57600080fd5b5051915061049d9050565b439150505b90565b469056fea164736f6c6343000606000a", + Bin: "0x608060405234801561001057600080fd5b506105d3806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80636057361d1461005157806383b6d6b714610066578063e9413d381461006e578063fadff0e114610093575b600080fd5b61006461005f366004610447565b6100a6565b005b610064610131565b61008161007c366004610447565b61014b565b60405190815260200160405180910390f35b6100646100a1366004610460565b6101c7565b60006100b182610269565b90508061011f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601360248201527f626c6f636b68617368286e29206661696c65640000000000000000000000000060448201526064015b60405180910390fd5b60009182526020829052604090912055565b61014961010061013f61036e565b61005f9190610551565b565b600081815260208190526040812054806101c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f626c6f636b68617368206e6f7420666f756e6420696e2073746f7265000000006044820152606401610116565b92915050565b6000806101d5846001610539565b815260200190815260200160002054818051906020012014610253576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f6865616465722068617320756e6b6e6f776e20626c6f636b68617368000000006044820152606401610116565b6024015160009182526020829052604090912055565b6000466102758161040b565b1561035e576101008367ffffffffffffffff1661029061036e565b61029a9190610551565b11806102b757506102a961036e565b8367ffffffffffffffff1610155b156102c55750600092915050565b6040517f2b407a8200000000000000000000000000000000000000000000000000000000815267ffffffffffffffff84166004820152606490632b407a829060240160206040518083038186803b15801561031f57600080fd5b505afa158015610333573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610357919061042e565b9392505050565b505067ffffffffffffffff164090565b60004661037a8161040b565b1561040457606473ffffffffffffffffffffffffffffffffffffffff1663a3b1b31d6040518163ffffffff1660e01b815260040160206040518083038186803b1580156103c657600080fd5b505afa1580156103da573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906103fe919061042e565b91505090565b4391505090565b600061a4b182148061041f575062066eed82145b806101c157505062066eee1490565b60006020828403121561044057600080fd5b5051919050565b60006020828403121561045957600080fd5b5035919050565b6000806040838503121561047357600080fd5b82359150602083013567ffffffffffffffff8082111561049257600080fd5b818501915085601f8301126104a657600080fd5b8135818111156104b8576104b8610597565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104fe576104fe610597565b8160405282815288602084870101111561051757600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b6000821982111561054c5761054c610568565b500190565b60008282101561056357610563610568565b500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c6343000806000a", } var BlockhashStoreABI = BlockhashStoreMetaData.ABI diff --git a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt index e6809f3aa00..91bd67ca505 100644 --- a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -10,7 +10,7 @@ automation_utils_2_1: ../../contracts/solc/v0.8.16/AutomationUtils2_1.abi ../../ batch_blockhash_store: ../../contracts/solc/v0.8.6/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore.bin 14356c48ef70f66ef74f22f644450dbf3b2a147c1b68deaa7e7d1eb8ffab15db batch_vrf_coordinator_v2: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.bin d0a54963260d8c1f1bbd984b758285e6027cfb5a7e42701bcb562ab123219332 batch_vrf_coordinator_v2plus: ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2Plus.bin 7bb76ae241cf1b37b41920830b836cb99f1ad33efd7435ca2398ff6cd2fe5d48 -blockhash_store: ../../contracts/solc/v0.6/BlockhashStore.abi ../../contracts/solc/v0.6/BlockhashStore.bin a0dc60bcc4bf071033d23fddf7ae936c6a4d1dd81488434b7e24b7aa1fabc37c +blockhash_store: ../../contracts/solc/v0.8.6/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore.bin 12b0662f1636a341c8863bdec7a20f2ddd97c3a4fd1a7ae353fe316609face4e chain_specific_util_helper: ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper.abi ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper.bin 5f10664e31abc768f4a37901cae7a3bef90146180f97303e5a1bde5a08d84595 consumer_wrapper: ../../contracts/solc/v0.7/Consumer.abi ../../contracts/solc/v0.7/Consumer.bin 894d1cbd920dccbd36d92918c1037c6ded34f66f417ccb18ec3f33c64ef83ec5 cron_upkeep_factory_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7 diff --git a/core/gethwrappers/go_generate.go b/core/gethwrappers/go_generate.go index 0dc1aaef825..2680b899b1e 100644 --- a/core/gethwrappers/go_generate.go +++ b/core/gethwrappers/go_generate.go @@ -13,7 +13,6 @@ package gethwrappers //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.abi ../../contracts/solc/v0.6/VRFRequestIDBaseTestHelper.bin VRFRequestIDBaseTestHelper solidity_vrf_request_id //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Flags.abi ../../contracts/solc/v0.6/Flags.bin Flags flags_wrapper //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/Oracle.abi ../../contracts/solc/v0.6/Oracle.bin Oracle oracle_wrapper -//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/BlockhashStore.abi ../../contracts/solc/v0.6/BlockhashStore.bin BlockhashStore blockhash_store //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/TestAPIConsumer.abi ../../contracts/solc/v0.6/TestAPIConsumer.bin TestAPIConsumer test_api_consumer_wrapper //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockETHLINKAggregator.abi ../../contracts/solc/v0.6/MockETHLINKAggregator.bin MockETHLINKAggregator mock_ethlink_aggregator_wrapper //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.6/MockGASAggregator.abi ../../contracts/solc/v0.6/MockGASAggregator.bin MockGASAggregator mock_gas_aggregator_wrapper @@ -24,8 +23,6 @@ package gethwrappers //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/OperatorFactory.abi ../../contracts/solc/v0.7/OperatorFactory.bin OperatorFactory operator_factory //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/AuthorizedForwarder.abi ../../contracts/solc/v0.7/AuthorizedForwarder.bin AuthorizedForwarder authorized_forwarder //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.7/AuthorizedReceiver.abi ../../contracts/solc/v0.7/AuthorizedReceiver.bin AuthorizedReceiver authorized_receiver -//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore.bin BatchBlockhashStore batch_blockhash_store -//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.bin BatchVRFCoordinatorV2 batch_vrf_coordinator_v2 //go:generate go run ./generation/generate/wrap.go OffchainAggregator/OffchainAggregator.abi - OffchainAggregator offchain_aggregator_wrapper // Automation @@ -78,6 +75,9 @@ package gethwrappers //go:generate go run ./generation/generate_link/wrap_link.go // VRF V2 +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BlockhashStore.abi ../../contracts/solc/v0.8.6/BlockhashStore.bin BlockhashStore blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchBlockhashStore.abi ../../contracts/solc/v0.8.6/BatchBlockhashStore.bin BatchBlockhashStore batch_blockhash_store +//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/BatchVRFCoordinatorV2.bin BatchVRFCoordinatorV2 batch_vrf_coordinator_v2 //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFOwner.abi ../../contracts/solc/v0.8.6/VRFOwner.bin VRFOwner vrf_owner //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFCoordinatorV2.abi ../../contracts/solc/v0.8.6/VRFCoordinatorV2.bin VRFCoordinatorV2 vrf_coordinator_v2 //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/VRFConsumerV2.abi ../../contracts/solc/v0.8.6/VRFConsumerV2.bin VRFConsumerV2 vrf_consumer_v2 From 96155bc2d3eab22c6ba46881348567194abc065e Mon Sep 17 00:00:00 2001 From: Lukasz <120112546+lukaszcl@users.noreply.github.com> Date: Mon, 16 Oct 2023 11:38:45 +0200 Subject: [PATCH 2/9] Refactor E2E docker wrappers to use ClCluster (#10945) * Use E2E wrapper for node and cluster from CTF * Fix * Fix go mod * Fix * Move back node and cluster wrappers to core * Update go mod * Update go mod --- integration-tests/actions/actions_local.go | 1 - .../actions/vrfv2_actions/vrfv2_steps.go | 8 +- .../actions/vrfv2plus/vrfv2plus_steps.go | 17 +-- integration-tests/docker/test_env/cl_node.go | 15 ++- .../docker/test_env/cl_node_cluster.go | 68 ++++++++++++ integration-tests/docker/test_env/test_env.go | 101 ++++-------------- .../docker/test_env/test_env_builder.go | 6 +- .../docker/test_env/test_env_config.go | 7 +- integration-tests/go.mod | 29 +++-- integration-tests/go.sum | 48 ++++----- integration-tests/load/vrfv2/vrfv2_test.go | 5 +- .../migration/upgrade_version_test.go | 2 +- integration-tests/smoke/automation_test.go | 4 +- integration-tests/smoke/cron_test.go | 6 +- integration-tests/smoke/flux_test.go | 4 +- integration-tests/smoke/forwarder_ocr_test.go | 2 +- .../smoke/forwarders_ocr2_test.go | 2 +- integration-tests/smoke/keeper_test.go | 2 +- integration-tests/smoke/ocr2_test.go | 2 +- integration-tests/smoke/ocr_test.go | 2 +- integration-tests/smoke/runlog_test.go | 4 +- integration-tests/smoke/vrf_test.go | 4 +- integration-tests/smoke/vrfv2_test.go | 10 +- integration-tests/smoke/vrfv2plus_test.go | 10 +- 24 files changed, 182 insertions(+), 177 deletions(-) create mode 100644 integration-tests/docker/test_env/cl_node_cluster.go diff --git a/integration-tests/actions/actions_local.go b/integration-tests/actions/actions_local.go index d2e2fde3217..b65bac43bb1 100644 --- a/integration-tests/actions/actions_local.go +++ b/integration-tests/actions/actions_local.go @@ -3,7 +3,6 @@ package actions import ( "github.com/pkg/errors" - "github.com/smartcontractkit/chainlink/integration-tests/docker/test_env" ) diff --git a/integration-tests/actions/vrfv2_actions/vrfv2_steps.go b/integration-tests/actions/vrfv2_actions/vrfv2_steps.go index 96886eb8493..24ac217a334 100644 --- a/integration-tests/actions/vrfv2_actions/vrfv2_steps.go +++ b/integration-tests/actions/vrfv2_actions/vrfv2_steps.go @@ -212,21 +212,21 @@ func SetupLocalLoadTestEnv(nodesFunding *big.Float, subFundingLINK *big.Int) (*t if err != nil { return nil, nil, [32]byte{}, err } - jobs, err := CreateVRFV2Jobs(env.GetAPIs(), vrfv2Contracts.Coordinator, env.EVMClient, vrfConst.MinimumConfirmations) + jobs, err := CreateVRFV2Jobs(env.ClCluster.NodeAPIs(), vrfv2Contracts.Coordinator, env.EVMClient, vrfConst.MinimumConfirmations) if err != nil { return nil, nil, [32]byte{}, err } // this part is here because VRFv2 can work with only a specific key // [[EVM.KeySpecific]] // Key = '...' - addr, err := env.CLNodes[0].API.PrimaryEthAddress() + addr, err := env.ClCluster.Nodes[0].API.PrimaryEthAddress() if err != nil { return nil, nil, [32]byte{}, err } - nodeConfig := node.NewConfig(env.CLNodes[0].NodeConfig, + nodeConfig := node.NewConfig(env.ClCluster.Nodes[0].NodeConfig, node.WithVRFv2EVMEstimator(addr), ) - err = env.CLNodes[0].Restart(nodeConfig) + err = env.ClCluster.Nodes[0].Restart(nodeConfig) if err != nil { return nil, nil, [32]byte{}, err } diff --git a/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go b/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go index 5d4b963f763..17e321ea63a 100644 --- a/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go +++ b/integration-tests/actions/vrfv2plus/vrfv2plus_steps.go @@ -3,11 +3,12 @@ package vrfv2plus import ( "context" "fmt" - "github.com/smartcontractkit/chainlink/v2/core/assets" - "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer" "math/big" "time" + "github.com/smartcontractkit/chainlink/v2/core/assets" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/vrfv2plus_wrapper_load_test_consumer" + "github.com/ethereum/go-ethereum/common" "github.com/google/uuid" "github.com/pkg/errors" @@ -288,13 +289,13 @@ func SetupVRFV2_5Environment( return nil, nil, nil, err } - vrfKey, err := env.GetAPIs()[0].MustCreateVRFKey() + vrfKey, err := env.ClCluster.NodeAPIs()[0].MustCreateVRFKey() if err != nil { return nil, nil, nil, errors.Wrap(err, ErrCreatingVRFv2PlusKey) } pubKeyCompressed := vrfKey.Data.ID - nativeTokenPrimaryKeyAddress, err := env.GetAPIs()[0].PrimaryEthAddress() + nativeTokenPrimaryKeyAddress, err := env.ClCluster.NodeAPIs()[0].PrimaryEthAddress() if err != nil { return nil, nil, nil, errors.Wrap(err, ErrNodePrimaryKey) } @@ -310,7 +311,7 @@ func SetupVRFV2_5Environment( chainID := env.EVMClient.GetChainID() job, err := CreateVRFV2PlusJob( - env.GetAPIs()[0], + env.ClCluster.NodeAPIs()[0], vrfv2_5Contracts.Coordinator.Address(), nativeTokenPrimaryKeyAddress, pubKeyCompressed, @@ -324,14 +325,14 @@ func SetupVRFV2_5Environment( // this part is here because VRFv2 can work with only a specific key // [[EVM.KeySpecific]] // Key = '...' - addr, err := env.CLNodes[0].API.PrimaryEthAddress() + addr, err := env.ClCluster.Nodes[0].API.PrimaryEthAddress() if err != nil { return nil, nil, nil, errors.Wrap(err, ErrGetPrimaryKey) } - nodeConfig := node.NewConfig(env.CLNodes[0].NodeConfig, + nodeConfig := node.NewConfig(env.ClCluster.Nodes[0].NodeConfig, node.WithVRFv2EVMEstimator(addr), ) - err = env.CLNodes[0].Restart(nodeConfig) + err = env.ClCluster.Nodes[0].Restart(nodeConfig) if err != nil { return nil, nil, nil, errors.Wrap(err, ErrRestartCLNode) } diff --git a/integration-tests/docker/test_env/cl_node.go b/integration-tests/docker/test_env/cl_node.go index c3618d10ad9..c9193d9095b 100644 --- a/integration-tests/docker/test_env/cl_node.go +++ b/integration-tests/docker/test_env/cl_node.go @@ -45,15 +45,13 @@ var ( type ClNode struct { test_env.EnvComponent - API *client.ChainlinkClient - NodeConfig *chainlink.Config - NodeSecretsConfigTOML string - PostgresDb *test_env.PostgresDb - lw *logwatch.LogWatch - ContainerImage string - ContainerVersion string + API *client.ChainlinkClient `json:"-"` + NodeConfig *chainlink.Config `json:"-"` + NodeSecretsConfigTOML string `json:"-"` + PostgresDb *test_env.PostgresDb `json:"postgresDb"` t *testing.T l zerolog.Logger + lw *logwatch.LogWatch } type ClNodeOption = func(c *ClNode) @@ -107,11 +105,10 @@ func NewClNode(networks []string, nodeConfig *chainlink.Config, opts ...ClNodeOp return n } -func (n *ClNode) WithTestLogger(t *testing.T) *ClNode { +func (n *ClNode) SetTestLogger(t *testing.T) { n.l = logging.GetTestLogger(t) n.t = t n.PostgresDb.WithTestLogger(t) - return n } // Restart restarts only CL node, DB container is reused diff --git a/integration-tests/docker/test_env/cl_node_cluster.go b/integration-tests/docker/test_env/cl_node_cluster.go new file mode 100644 index 00000000000..a717a192649 --- /dev/null +++ b/integration-tests/docker/test_env/cl_node_cluster.go @@ -0,0 +1,68 @@ +package test_env + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/integration-tests/client" + "golang.org/x/sync/errgroup" +) + +var ( + ErrGetNodeCSAKeys = "failed get CL node CSA keys" +) + +type ClCluster struct { + Nodes []*ClNode `json:"nodes"` +} + +// Start all nodes in the cluster./docker/tests/functional/api +func (c *ClCluster) Start() error { + eg := &errgroup.Group{} + nodes := c.Nodes + + for i := 0; i < len(nodes); i++ { + nodeIndex := i + eg.Go(func() error { + err := nodes[nodeIndex].StartContainer() + if err != nil { + return err + } + return nil + }) + } + + return eg.Wait() +} + +func (c *ClCluster) NodeAPIs() []*client.ChainlinkClient { + clients := make([]*client.ChainlinkClient, 0) + for _, c := range c.Nodes { + clients = append(clients, c.API) + } + return clients +} + +// Return all the on-chain wallet addresses for a set of Chainlink nodes +func (c *ClCluster) NodeAddresses() ([]common.Address, error) { + addresses := make([]common.Address, 0) + for _, n := range c.Nodes { + primaryAddress, err := n.ChainlinkNodeAddress() + if err != nil { + return nil, err + } + addresses = append(addresses, primaryAddress) + } + return addresses, nil +} + +func (c *ClCluster) NodeCSAKeys() ([]string, error) { + var keys []string + for _, n := range c.Nodes { + csaKeys, err := n.GetNodeCSAKeys() + if err != nil { + return nil, errors.Wrap(err, ErrGetNodeCSAKeys) + } + keys = append(keys, csaKeys.Data[0].ID) + } + return keys, nil +} diff --git a/integration-tests/docker/test_env/test_env.go b/integration-tests/docker/test_env/test_env.go index f7a37cc3606..9eb9ed9f39b 100644 --- a/integration-tests/docker/test_env/test_env.go +++ b/integration-tests/docker/test_env/test_env.go @@ -12,7 +12,6 @@ import ( "time" "github.com/ethereum/go-ethereum/accounts/keystore" - "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -25,16 +24,14 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/logwatch" - "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" - "github.com/smartcontractkit/chainlink/integration-tests/client" "github.com/smartcontractkit/chainlink/integration-tests/contracts" "github.com/smartcontractkit/chainlink/integration-tests/utils" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" ) var ( - ErrFundCLNode = "failed to fund CL node" - ErrGetNodeCSAKeys = "failed get CL node CSA keys" + ErrFundCLNode = "failed to fund CL node" ) type CLClusterTestEnv struct { @@ -43,7 +40,7 @@ type CLClusterTestEnv struct { LogWatch *logwatch.LogWatch /* components */ - CLNodes []*ClNode + ClCluster *ClCluster Geth *test_env.Geth // for tests using --dev networks PrivateChain []test_env.PrivateChain // for tests using non-dev networks MockAdapter *test_env.Killgrave @@ -139,73 +136,33 @@ func (te *CLClusterTestEnv) StartMockAdapter() error { return te.MockAdapter.StartContainer() } -func (te *CLClusterTestEnv) GetAPIs() []*client.ChainlinkClient { - clients := make([]*client.ChainlinkClient, 0) - for _, c := range te.CLNodes { - clients = append(clients, c.API) - } - return clients -} - -// StartClNodes start one bootstrap node and {count} OCR nodes -func (te *CLClusterTestEnv) StartClNodes(nodeConfig *chainlink.Config, count int, secretsConfig string) error { - eg := &errgroup.Group{} - nodes := make(chan *ClNode, count) - - // Start nodes - for i := 0; i < count; i++ { - nodeIndex := i - eg.Go(func() error { - var nodeContainerName, dbContainerName string - if te.Cfg != nil { - nodeContainerName = te.Cfg.Nodes[nodeIndex].NodeContainerName - dbContainerName = te.Cfg.Nodes[nodeIndex].DbContainerName - } - n := NewClNode([]string{te.Network.Name}, nodeConfig, +func (te *CLClusterTestEnv) StartClCluster(nodeConfig *chainlink.Config, count int, secretsConfig string) error { + if te.Cfg != nil && te.Cfg.ClCluster != nil { + te.ClCluster = te.Cfg.ClCluster + } else { + te.ClCluster = &ClCluster{} + for i := 0; i < count; i++ { + ocrNode := NewClNode([]string{te.Network.Name}, nodeConfig, WithSecrets(secretsConfig), - WithNodeContainerName(nodeContainerName), - WithDbContainerName(dbContainerName), ) - if te.t != nil { - n.WithTestLogger(te.t) - } - err := n.StartContainer() - if err != nil { - return err - } - nodes <- n - return nil - }) - } - - if err := eg.Wait(); err != nil { - return err - } - close(nodes) - - for node := range nodes { - te.CLNodes = append(te.CLNodes, node) + te.ClCluster.Nodes = append(te.ClCluster.Nodes, ocrNode) + } } - return nil -} - -// ChainlinkNodeAddresses will return all the on-chain wallet addresses for a set of Chainlink nodes -func (te *CLClusterTestEnv) ChainlinkNodeAddresses() ([]common.Address, error) { - addresses := make([]common.Address, 0) - for _, n := range te.CLNodes { - primaryAddress, err := n.ChainlinkNodeAddress() - if err != nil { - return nil, err + // Set test logger + if te.t != nil { + for _, n := range te.ClCluster.Nodes { + n.SetTestLogger(te.t) } - addresses = append(addresses, primaryAddress) } - return addresses, nil + + // Start/attach node containers + return te.ClCluster.Start() } // FundChainlinkNodes will fund all the provided Chainlink nodes with a set amount of native currency func (te *CLClusterTestEnv) FundChainlinkNodes(amount *big.Float) error { - for _, cl := range te.CLNodes { + for _, cl := range te.ClCluster.Nodes { if err := cl.Fund(te.EVMClient, amount); err != nil { return errors.Wrap(err, ErrFundCLNode) } @@ -213,18 +170,6 @@ func (te *CLClusterTestEnv) FundChainlinkNodes(amount *big.Float) error { return te.EVMClient.WaitForEvents() } -func (te *CLClusterTestEnv) GetNodeCSAKeys() ([]string, error) { - var keys []string - for _, n := range te.CLNodes { - csaKeys, err := n.GetNodeCSAKeys() - if err != nil { - return nil, errors.Wrap(err, ErrGetNodeCSAKeys) - } - keys = append(keys, csaKeys.Data[0].ID) - } - return keys, nil -} - func (te *CLClusterTestEnv) Terminate() error { // TESTCONTAINERS_RYUK_DISABLED=false by default so ryuk will remove all // the containers and the Network @@ -237,7 +182,7 @@ func (te *CLClusterTestEnv) Cleanup() error { if te.t == nil { return errors.New("cannot cleanup test environment without a testing.T") } - if te.CLNodes == nil { + if te.ClCluster == nil || len(te.ClCluster.Nodes) == 0 { return errors.New("chainlink nodes are nil, unable cleanup chainlink nodes") } @@ -279,7 +224,7 @@ func (te *CLClusterTestEnv) collectTestLogs() error { } eg := &errgroup.Group{} - for _, n := range te.CLNodes { + for _, n := range te.ClCluster.Nodes { node := n eg.Go(func() error { logFileName := filepath.Join(folder, fmt.Sprintf("node-%s.log", node.ContainerName)) @@ -311,7 +256,7 @@ func (te *CLClusterTestEnv) collectTestLogs() error { func (te *CLClusterTestEnv) returnFunds() error { te.l.Info().Msg("Attempting to return Chainlink node funds to default network wallets") - for _, chainlinkNode := range te.CLNodes { + for _, chainlinkNode := range te.ClCluster.Nodes { fundedKeys, err := chainlinkNode.API.ExportEVMKeysForChain(te.EVMClient.GetChainID().String()) if err != nil { return err diff --git a/integration-tests/docker/test_env/test_env_builder.go b/integration-tests/docker/test_env/test_env_builder.go index c9c68f0ea9c..cdae5a9d72b 100644 --- a/integration-tests/docker/test_env/test_env_builder.go +++ b/integration-tests/docker/test_env/test_env_builder.go @@ -197,7 +197,7 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { return nil, errors.New("cannot create nodes with custom config without nonDevNetworks") } - err = b.te.StartClNodes(b.clNodeConfig, b.clNodesCount, b.secretsConfig) + err = b.te.StartClCluster(b.clNodeConfig, b.clNodesCount, b.secretsConfig) if err != nil { return nil, err } @@ -261,12 +261,12 @@ func (b *CLTestEnvBuilder) Build() (*CLClusterTestEnv, error) { node.SetChainConfig(cfg, wsUrls, httpUrls, networkConfig, b.hasForwarders) } - err := b.te.StartClNodes(cfg, b.clNodesCount, b.secretsConfig) + err := b.te.StartClCluster(cfg, b.clNodesCount, b.secretsConfig) if err != nil { return nil, err } - nodeCsaKeys, err = b.te.GetNodeCSAKeys() + nodeCsaKeys, err = b.te.ClCluster.NodeCSAKeys() if err != nil { return nil, err } diff --git a/integration-tests/docker/test_env/test_env_config.go b/integration-tests/docker/test_env/test_env_config.go index dfac6f9520a..1a0c8d5c86a 100644 --- a/integration-tests/docker/test_env/test_env_config.go +++ b/integration-tests/docker/test_env/test_env_config.go @@ -10,7 +10,7 @@ type TestEnvConfig struct { Networks []string `json:"networks"` Geth GethConfig `json:"geth"` MockAdapter MockAdapterConfig `json:"mock_adapter"` - Nodes []ClNodeConfig `json:"nodes"` + ClCluster *ClCluster `json:"clCluster"` } type MockAdapterConfig struct { @@ -22,11 +22,6 @@ type GethConfig struct { ContainerName string `json:"container_name"` } -type ClNodeConfig struct { - NodeContainerName string `json:"container_name"` - DbContainerName string `json:"db_container_name"` -} - func NewTestEnvConfigFromFile(path string) (*TestEnvConfig, error) { c := &TestEnvConfig{} err := env.ParseJSONFile(path, c) diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 4d37e25f668..4bad08d5ab6 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -20,7 +20,7 @@ require ( github.com/rs/zerolog v1.30.0 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-env v0.38.3 - github.com/smartcontractkit/chainlink-testing-framework v1.17.11 + github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231016091231-25809996fbd4 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 github.com/smartcontractkit/libocr v0.0.0-20230922131214-122accb19ea6 github.com/smartcontractkit/ocr2keepers v0.7.27 @@ -70,7 +70,7 @@ require ( github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/avast/retry-go/v4 v4.5.0 // indirect - github.com/aws/aws-sdk-go v1.44.276 // indirect + github.com/aws/aws-sdk-go v1.44.302 // indirect github.com/aws/constructs-go/constructs/v10 v10.1.255 // indirect github.com/aws/jsii-runtime-go v1.75.0 // indirect github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect @@ -129,7 +129,7 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/dvsekhvalnov/jose2go v1.5.0 // indirect github.com/edsrzf/mmap-go v1.1.0 // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/emicklei/go-restful/v3 v3.10.2 // indirect github.com/esote/minmaxheap v1.0.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -158,7 +158,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.3 // indirect + github.com/go-openapi/errors v0.20.4 // indirect github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect @@ -213,7 +213,7 @@ require ( github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/gtank/merlin v0.1.1 // indirect github.com/gtank/ristretto255 v0.1.2 // indirect - github.com/hashicorp/consul/api v1.21.0 // indirect + github.com/hashicorp/consul/api v1.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect @@ -312,7 +312,7 @@ require ( github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.54 // indirect + github.com/miekg/dns v1.1.55 // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 // indirect github.com/minio/sha256-simd v1.0.0 // indirect @@ -368,7 +368,6 @@ require ( github.com/prometheus/exporter-toolkit v0.10.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/prometheus/prometheus v0.46.0 // indirect - github.com/pyroscope-io/client v0.7.1 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect @@ -433,7 +432,7 @@ require ( go.etcd.io/etcd/api/v3 v3.5.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect go.etcd.io/etcd/client/v3 v3.5.7 // indirect - go.mongodb.org/mongo-driver v1.11.3 // indirect + go.mongodb.org/mongo-driver v1.12.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.42.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect @@ -473,23 +472,23 @@ require ( gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.26.2 // indirect + k8s.io/api v0.27.3 // indirect k8s.io/apiextensions-apiserver v0.25.3 // indirect - k8s.io/apimachinery v0.26.2 // indirect + k8s.io/apimachinery v0.27.3 // indirect k8s.io/cli-runtime v0.25.11 // indirect - k8s.io/client-go v0.26.2 // indirect + k8s.io/client-go v0.27.3 // indirect k8s.io/component-base v0.26.2 // indirect - k8s.io/klog/v2 v2.90.1 // indirect - k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect k8s.io/kubectl v0.25.11 // indirect - k8s.io/utils v0.0.0-20230308161112-d77c459e9343 // indirect + k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect nhooyr.io/websocket v1.8.7 // indirect pgregory.net/rapid v0.5.5 // indirect sigs.k8s.io/controller-runtime v0.13.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.12.1 // indirect sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/integration-tests/go.sum b/integration-tests/go.sum index e3dc28f45cb..75beea52e8f 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -668,8 +668,8 @@ github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.276 h1:ywPlx9C5Yc482dUgAZ9bHpQ6onVvJvYE9FJWsNDCEy0= -github.com/aws/aws-sdk-go v1.44.276/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.44.302 h1:ST3ko6GrJKn3Xi+nAvxjG3uk/V1pW8KC52WLeIxqqNk= +github.com/aws/aws-sdk-go v1.44.302/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/constructs-go/constructs/v10 v10.1.255 h1:5hARfEmhBqHSTQf/C3QLA3sWOxO2Dfja0iA1W7ZcI7g= github.com/aws/constructs-go/constructs/v10 v10.1.255/go.mod h1:DCdBSjN04Ck2pajCacTD4RKFqSA7Utya8d62XreYctI= github.com/aws/jsii-runtime-go v1.75.0 h1:NhpUfyiL7/wsRuUekFsz8FFBCYLfPD/l61kKg9kL/a4= @@ -921,10 +921,8 @@ github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= +github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -1071,8 +1069,8 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= -github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -1397,8 +1395,8 @@ github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIv github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.21.0 h1:WMR2JiyuaQWRAMFaOGiYfY4Q4HRpyYRe/oYQofjyduM= -github.com/hashicorp/consul/api v1.21.0/go.mod h1:f8zVJwBcLdr1IQnfdfszjUM0xzp31Zl3bpws3pL9uFM= +github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= +github.com/hashicorp/consul/api v1.22.0/go.mod h1:zHpYgZ7TeYqS6zaszjwSt128OwESRpnhU9aGa6ue3Eg= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs= github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg= @@ -1990,8 +1988,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI= -github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= @@ -2368,8 +2366,8 @@ github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231010203454-896f5c3c04d github.com/smartcontractkit/chainlink-solana v1.0.3-0.20231010203454-896f5c3c04d1/go.mod h1:RIUJXn7EVp24TL2p4FW79dYjyno23x5mjt1nKN+5WEk= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918 h1:ByVauKFXphRlSNG47lNuxZ9aicu+r8AoNp933VRPpCw= github.com/smartcontractkit/chainlink-starknet/relayer v0.0.1-beta-test.0.20230901115736-bbabe542a918/go.mod h1:/yp/sqD8Iz5GU5fcercjrw0ivJF7HDcupYg+Gjr7EPg= -github.com/smartcontractkit/chainlink-testing-framework v1.17.11 h1:snDGgkNc7Epv6xzFnIsQ19lZ23NYFFvqkWCRG35G65A= -github.com/smartcontractkit/chainlink-testing-framework v1.17.11/go.mod h1:QQxildwii+vl1L2764PzQ0A1vxfYFmhpWjdnYGn/BrM= +github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231016091231-25809996fbd4 h1:yidN70ecUI3Ydweggk9eHyCSLX7N39YDmYt8GoYx2XU= +github.com/smartcontractkit/chainlink-testing-framework v1.17.12-0.20231016091231-25809996fbd4/go.mod h1:RWlmjwnjIGbQAnRfKwe02Ife82nNI3rZmdI0zgkfbyk= github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306 h1:ko88+ZznniNJZbZPWAvHQU8SwKAdHngdDZ+pvVgB5ss= github.com/smartcontractkit/go-plugin v0.0.0-20231003134350-e49dad63b306/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= github.com/smartcontractkit/grpc-proxy v0.0.0-20230731113816-f1be6620749f h1:hgJif132UCdjo8u43i7iPN1/MFnu49hv7lFGFftCHKU= @@ -2545,8 +2543,10 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -2601,8 +2601,8 @@ go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/ go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= -go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.mongodb.org/mongo-driver v1.12.0 h1:aPx33jmn/rQuJXPQLZQ8NtfPQG8CaqgLThFtqRb0PiE= +go.mongodb.org/mongo-driver v1.12.0/go.mod h1:AZkxhPnFJUoH7kZlFkVKucV20K387miPfm7oimrSmK0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= @@ -3521,22 +3521,22 @@ k8s.io/api v0.25.11 h1:4mjYDfE3yp22jrytjH0knwgzjXKkxHX4D01ZCAazvZM= k8s.io/api v0.25.11/go.mod h1:bK4UvD4bthtutNlvensrfBX21PRQ/vs2cIYggHkOOAo= k8s.io/apiextensions-apiserver v0.25.3 h1:bfI4KS31w2f9WM1KLGwnwuVlW3RSRPuIsfNF/3HzR0k= k8s.io/apiextensions-apiserver v0.25.3/go.mod h1:ZJqwpCkxIx9itilmZek7JgfUAM0dnTsA48I4krPqRmo= -k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= -k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM= +k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E= k8s.io/cli-runtime v0.25.11 h1:GE2yNZm1tN+MJtw1SGMOLesLF7Kp7NVAVqRSTbXfu4o= k8s.io/cli-runtime v0.25.11/go.mod h1:r/nEINuHVEpgGhcd2WamU7hD1t/lMnSz8XM44Autltc= k8s.io/client-go v0.25.11 h1:DJQ141UsbNRI6wYSlcYLP5J5BW5Wq7Bgm42Ztq2SW70= k8s.io/client-go v0.25.11/go.mod h1:41Xs7p1SfhoReUnmjjYCfCNWFiq4xSkexwJfbxF2F7A= k8s.io/component-base v0.26.2 h1:IfWgCGUDzrD6wLLgXEstJKYZKAFS2kO+rBRi0p3LqcI= k8s.io/component-base v0.26.2/go.mod h1:DxbuIe9M3IZPRxPIzhch2m1eT7uFrSBJUBuVCQEBivs= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= k8s.io/kubectl v0.25.11 h1:6bsft5Gan6BCvQ7cJbDRFjTm4Zfq8GuUYpsWAdVngYE= k8s.io/kubectl v0.25.11/go.mod h1:8mIfgkFgT+yJ8/TlmPW1qoRh46H2si9q5nW8id7i9iM= -k8s.io/utils v0.0.0-20230308161112-d77c459e9343 h1:m7tbIjXGcGIAtpmQr7/NAi7RsWoW3E7Zcm4jI1HicTc= -k8s.io/utils v0.0.0-20230308161112-d77c459e9343/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= +k8s.io/utils v0.0.0-20230711102312-30195339c3c7/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= @@ -3587,7 +3587,7 @@ sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/integration-tests/load/vrfv2/vrfv2_test.go b/integration-tests/load/vrfv2/vrfv2_test.go index 97a455dc714..a9fb80a72ad 100644 --- a/integration-tests/load/vrfv2/vrfv2_test.go +++ b/integration-tests/load/vrfv2/vrfv2_test.go @@ -1,10 +1,11 @@ package loadvrfv2 import ( + "testing" + "github.com/smartcontractkit/chainlink/integration-tests/actions/vrfv2_actions" "github.com/smartcontractkit/wasp" "github.com/stretchr/testify/require" - "testing" ) func TestVRFV2Load(t *testing.T) { @@ -31,7 +32,7 @@ func TestVRFV2Load(t *testing.T) { T: t, LoadType: wasp.VU, GenName: "vu", - VU: NewJobVolumeVU(cfg.SoakVolume.Pace.Duration(), 1, env.GetAPIs(), env.EVMClient, vrfv2Contracts), + VU: NewJobVolumeVU(cfg.SoakVolume.Pace.Duration(), 1, env.ClCluster.NodeAPIs(), env.EVMClient, vrfv2Contracts), Labels: labels, LokiConfig: wasp.NewEnvLokiConfig(), } diff --git a/integration-tests/migration/upgrade_version_test.go b/integration-tests/migration/upgrade_version_test.go index d1d79de5eed..bf97f43d058 100644 --- a/integration-tests/migration/upgrade_version_test.go +++ b/integration-tests/migration/upgrade_version_test.go @@ -34,6 +34,6 @@ func TestVersionUpgrade(t *testing.T) { // MigrateOnStartup = true // // by default - err = env.CLNodes[0].Restart(env.CLNodes[0].NodeConfig) + err = env.ClCluster.Nodes[0].Restart(env.ClCluster.Nodes[0].NodeConfig) require.NoError(t, err) } diff --git a/integration-tests/smoke/automation_test.go b/integration-tests/smoke/automation_test.go index ca5db25cd97..0a1ba14e1ce 100644 --- a/integration-tests/smoke/automation_test.go +++ b/integration-tests/smoke/automation_test.go @@ -162,7 +162,7 @@ func SetupAutomationBasic(t *testing.T, nodeUpgrade bool) { expect := 5 // Upgrade the nodes one at a time and check that the upkeeps are still being performed for i := 0; i < 5; i++ { - actions.UpgradeChainlinkNodeVersionsLocal(upgradeImage, upgradeVersion, testEnv.CLNodes[i]) + actions.UpgradeChainlinkNodeVersionsLocal(upgradeImage, upgradeVersion, testEnv.ClCluster.Nodes[i]) time.Sleep(time.Second * 10) expect = expect + 5 gom.Eventually(func(g gomega.Gomega) { @@ -1028,7 +1028,7 @@ func setupAutomationTestDocker( txCost, err := env.EVMClient.EstimateCostForChainlinkOperations(1000) require.NoError(t, err, "Error estimating cost for Chainlink Operations") - nodeClients := env.GetAPIs() + nodeClients := env.ClCluster.NodeAPIs() workerNodes := nodeClients[1:] err = actions.FundChainlinkNodesLocal(workerNodes, env.EVMClient, txCost) require.NoError(t, err, "Error funding Chainlink nodes") diff --git a/integration-tests/smoke/cron_test.go b/integration-tests/smoke/cron_test.go index dee37af3540..0da9d6461ca 100644 --- a/integration-tests/smoke/cron_test.go +++ b/integration-tests/smoke/cron_test.go @@ -35,10 +35,10 @@ func TestCronBasic(t *testing.T) { URL: fmt.Sprintf("%s/variable", env.MockAdapter.InternalEndpoint), RequestData: "{}", } - err = env.CLNodes[0].API.MustCreateBridge(bta) + err = env.ClCluster.Nodes[0].API.MustCreateBridge(bta) require.NoError(t, err, "Creating bridge in chainlink node shouldn't fail") - job, err := env.CLNodes[0].API.MustCreateJob(&client.CronJobSpec{ + job, err := env.ClCluster.Nodes[0].API.MustCreateJob(&client.CronJobSpec{ Schedule: "CRON_TZ=UTC * * * * * *", ObservationSource: client.ObservationSourceSpecBridge(bta), }) @@ -46,7 +46,7 @@ func TestCronBasic(t *testing.T) { gom := gomega.NewGomegaWithT(t) gom.Eventually(func(g gomega.Gomega) { - jobRuns, err := env.CLNodes[0].API.MustReadRunsByJob(job.Data.ID) + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) if err != nil { l.Info().Err(err).Msg("error while waiting for job runs") } diff --git a/integration-tests/smoke/flux_test.go b/integration-tests/smoke/flux_test.go index b517cb5b57a..a9a9e848111 100644 --- a/integration-tests/smoke/flux_test.go +++ b/integration-tests/smoke/flux_test.go @@ -33,7 +33,7 @@ func TestFluxBasic(t *testing.T) { Build() require.NoError(t, err) - nodeAddresses, err := env.ChainlinkNodeAddresses() + nodeAddresses, err := env.ClCluster.NodeAddresses() require.NoError(t, err, "Retrieving on-chain wallet addresses for chainlink nodes shouldn't fail") env.EVMClient.ParallelTransactions(true) @@ -83,7 +83,7 @@ func TestFluxBasic(t *testing.T) { Name: fmt.Sprintf("variable-%s", adapterUUID), URL: adapterFullURL, } - for i, n := range env.CLNodes { + for i, n := range env.ClCluster.Nodes { err = n.API.MustCreateBridge(bta) require.NoError(t, err, "Creating bridge shouldn't fail for node %d", i+1) diff --git a/integration-tests/smoke/forwarder_ocr_test.go b/integration-tests/smoke/forwarder_ocr_test.go index 21f108c732f..705ae7cd1a6 100644 --- a/integration-tests/smoke/forwarder_ocr_test.go +++ b/integration-tests/smoke/forwarder_ocr_test.go @@ -30,7 +30,7 @@ func TestForwarderOCRBasic(t *testing.T) { env.ParallelTransactions(true) - nodeClients := env.GetAPIs() + nodeClients := env.ClCluster.NodeAPIs() bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] workerNodeAddresses, err := actions.ChainlinkNodeAddressesLocal(workerNodes) diff --git a/integration-tests/smoke/forwarders_ocr2_test.go b/integration-tests/smoke/forwarders_ocr2_test.go index 994a1038f12..be26fd6213b 100644 --- a/integration-tests/smoke/forwarders_ocr2_test.go +++ b/integration-tests/smoke/forwarders_ocr2_test.go @@ -39,7 +39,7 @@ func TestForwarderOCR2Basic(t *testing.T) { env.ParallelTransactions(true) - nodeClients := env.GetAPIs() + nodeClients := env.ClCluster.NodeAPIs() bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] workerNodeAddresses, err := actions.ChainlinkNodeAddressesLocal(workerNodes) diff --git a/integration-tests/smoke/keeper_test.go b/integration-tests/smoke/keeper_test.go index 3280a7a23a6..4f1a4fde143 100644 --- a/integration-tests/smoke/keeper_test.go +++ b/integration-tests/smoke/keeper_test.go @@ -1123,5 +1123,5 @@ func setupKeeperTest(t *testing.T) ( err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Error waiting for events") - return env.EVMClient, env.GetAPIs(), env.ContractDeployer, linkTokenContract, env + return env.EVMClient, env.ClCluster.NodeAPIs(), env.ContractDeployer, linkTokenContract, env } diff --git a/integration-tests/smoke/ocr2_test.go b/integration-tests/smoke/ocr2_test.go index ac5ab3fdbb2..e32d86bef82 100644 --- a/integration-tests/smoke/ocr2_test.go +++ b/integration-tests/smoke/ocr2_test.go @@ -47,7 +47,7 @@ func TestOCRv2Basic(t *testing.T) { env.ParallelTransactions(true) - nodeClients := env.GetAPIs() + nodeClients := env.ClCluster.NodeAPIs() bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] linkToken, err := env.ContractDeployer.DeployLinkTokenContract() diff --git a/integration-tests/smoke/ocr_test.go b/integration-tests/smoke/ocr_test.go index 75c31e4aa2e..cba5935cd01 100644 --- a/integration-tests/smoke/ocr_test.go +++ b/integration-tests/smoke/ocr_test.go @@ -28,7 +28,7 @@ func TestOCRBasic(t *testing.T) { env.ParallelTransactions(true) - nodeClients := env.GetAPIs() + nodeClients := env.ClCluster.NodeAPIs() bootstrapNode, workerNodes := nodeClients[0], nodeClients[1:] linkTokenContract, err := env.ContractDeployer.DeployLinkTokenContract() diff --git a/integration-tests/smoke/runlog_test.go b/integration-tests/smoke/runlog_test.go index 14b467764ba..99d3a4f6c27 100644 --- a/integration-tests/smoke/runlog_test.go +++ b/integration-tests/smoke/runlog_test.go @@ -51,7 +51,7 @@ func TestRunLogBasic(t *testing.T) { Name: fmt.Sprintf("five-%s", jobUUID.String()), URL: fmt.Sprintf("%s/variable", env.MockAdapter.InternalEndpoint), } - err = env.CLNodes[0].API.MustCreateBridge(&bta) + err = env.ClCluster.Nodes[0].API.MustCreateBridge(&bta) require.NoError(t, err, "Creating bridge shouldn't fail") os := &client.DirectRequestTxPipelineSpec{ @@ -61,7 +61,7 @@ func TestRunLogBasic(t *testing.T) { ost, err := os.String() require.NoError(t, err, "Building observation source spec shouldn't fail") - _, err = env.CLNodes[0].API.MustCreateJob(&client.DirectRequestJobSpec{ + _, err = env.ClCluster.Nodes[0].API.MustCreateJob(&client.DirectRequestJobSpec{ Name: fmt.Sprintf("direct-request-%s", uuid.NewString()), MinIncomingConfirmations: "1", ContractAddress: oracle.Address(), diff --git a/integration-tests/smoke/vrf_test.go b/integration-tests/smoke/vrf_test.go index d375506a776..ba7e2f17a4a 100644 --- a/integration-tests/smoke/vrf_test.go +++ b/integration-tests/smoke/vrf_test.go @@ -44,7 +44,7 @@ func TestVRFBasic(t *testing.T) { err = env.EVMClient.WaitForEvents() require.NoError(t, err, "Waiting for event subscriptions in nodes shouldn't fail") - for _, n := range env.CLNodes { + for _, n := range env.ClCluster.Nodes { nodeKey, err := n.API.MustCreateVRFKey() require.NoError(t, err, "Creating VRF key shouldn't fail") l.Debug().Interface("Key JSON", nodeKey).Msg("Created proving key") @@ -88,7 +88,7 @@ func TestVRFBasic(t *testing.T) { gom := gomega.NewGomegaWithT(t) timeout := time.Minute * 2 gom.Eventually(func(g gomega.Gomega) { - jobRuns, err := env.CLNodes[0].API.MustReadRunsByJob(job.Data.ID) + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(job.Data.ID) g.Expect(err).ShouldNot(gomega.HaveOccurred(), "Job execution shouldn't fail") out, err := contracts.Consumer.RandomnessOutput(context.Background()) diff --git a/integration-tests/smoke/vrfv2_test.go b/integration-tests/smoke/vrfv2_test.go index 1b431397574..8d3868f8510 100644 --- a/integration-tests/smoke/vrfv2_test.go +++ b/integration-tests/smoke/vrfv2_test.go @@ -64,18 +64,18 @@ func TestVRFv2Basic(t *testing.T) { err = vrfv2_actions.FundVRFCoordinatorV2Subscription(lt, vrfv2Contracts.Coordinator, env.EVMClient, vrfConst.SubID, vrfConst.VRFSubscriptionFundingAmountLink) require.NoError(t, err) - vrfV2jobs, err := vrfv2_actions.CreateVRFV2Jobs(env.GetAPIs(), vrfv2Contracts.Coordinator, env.EVMClient, vrfConst.MinimumConfirmations) + vrfV2jobs, err := vrfv2_actions.CreateVRFV2Jobs(env.ClCluster.NodeAPIs(), vrfv2Contracts.Coordinator, env.EVMClient, vrfConst.MinimumConfirmations) require.NoError(t, err) // this part is here because VRFv2 can work with only a specific key // [[EVM.KeySpecific]] // Key = '...' - addr, err := env.CLNodes[0].API.PrimaryEthAddress() + addr, err := env.ClCluster.Nodes[0].API.PrimaryEthAddress() require.NoError(t, err) - nodeConfig := node.NewConfig(env.CLNodes[0].NodeConfig, + nodeConfig := node.NewConfig(env.ClCluster.Nodes[0].NodeConfig, node.WithVRFv2EVMEstimator(addr), ) - err = env.CLNodes[0].Restart(nodeConfig) + err = env.ClCluster.Nodes[0].Restart(nodeConfig) require.NoError(t, err) // test and assert @@ -93,7 +93,7 @@ func TestVRFv2Basic(t *testing.T) { timeout := time.Minute * 2 var lastRequestID *big.Int gom.Eventually(func(g gomega.Gomega) { - jobRuns, err := env.CLNodes[0].API.MustReadRunsByJob(vrfV2jobs[0].Job.Data.ID) + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(vrfV2jobs[0].Job.Data.ID) g.Expect(err).ShouldNot(gomega.HaveOccurred()) g.Expect(len(jobRuns.Data)).Should(gomega.BeNumerically("==", 1)) lastRequestID, err = vrfv2Contracts.LoadTestConsumer.GetLastRequestId(context.Background()) diff --git a/integration-tests/smoke/vrfv2plus_test.go b/integration-tests/smoke/vrfv2plus_test.go index 659616330b5..bb363ed2eb4 100644 --- a/integration-tests/smoke/vrfv2plus_test.go +++ b/integration-tests/smoke/vrfv2plus_test.go @@ -57,7 +57,7 @@ func TestVRFv2Plus(t *testing.T) { var isNativeBilling = false subBalanceBeforeRequest := subscription.Balance - jobRunsBeforeTest, err := env.CLNodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) + jobRunsBeforeTest, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) require.NoError(t, err, "error reading job runs") // test and assert @@ -78,7 +78,7 @@ func TestVRFv2Plus(t *testing.T) { subBalanceAfterRequest := subscription.Balance require.Equal(t, expectedSubBalanceJuels, subBalanceAfterRequest) - jobRuns, err := env.CLNodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) require.NoError(t, err, "error reading job runs") require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) @@ -98,7 +98,7 @@ func TestVRFv2Plus(t *testing.T) { var isNativeBilling = true subNativeTokenBalanceBeforeRequest := subscription.NativeBalance - jobRunsBeforeTest, err := env.CLNodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) + jobRunsBeforeTest, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) require.NoError(t, err, "error reading job runs") // test and assert @@ -118,7 +118,7 @@ func TestVRFv2Plus(t *testing.T) { subBalanceAfterRequest := subscription.NativeBalance require.Equal(t, expectedSubBalanceWei, subBalanceAfterRequest) - jobRuns, err := env.CLNodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) + jobRuns, err := env.ClCluster.Nodes[0].API.MustReadRunsByJob(vrfv2PlusData.VRFJob.Data.ID) require.NoError(t, err, "error reading job runs") require.Equal(t, len(jobRunsBeforeTest.Data)+1, len(jobRuns.Data)) @@ -309,7 +309,7 @@ func TestVRFv2PlusMigration(t *testing.T) { require.NoError(t, err, vrfv2plus.ErrWaitTXsComplete) _, err = vrfv2plus.CreateVRFV2PlusJob( - env.GetAPIs()[0], + env.ClCluster.NodeAPIs()[0], newCoordinator.Address(), vrfv2PlusData.PrimaryEthAddress, vrfv2PlusData.VRFKey.Data.ID, From a2c91ae9cd39aad24bb2aecf92945f5aac10892e Mon Sep 17 00:00:00 2001 From: Jordan Krage Date: Mon, 16 Oct 2023 05:21:57 -0500 Subject: [PATCH 3/9] handle unchecked errors (#10964) --- common/txmgr/txmgr.go | 29 ++++++++++++++++------------- integration-tests/testsetups/ocr.go | 6 +++--- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/common/txmgr/txmgr.go b/common/txmgr/txmgr.go index feb3218bfc6..6ca75e45fb8 100644 --- a/common/txmgr/txmgr.go +++ b/common/txmgr/txmgr.go @@ -3,13 +3,14 @@ package txmgr import ( "context" "database/sql" + "errors" "fmt" "math/big" "sync" "time" "github.com/google/uuid" - "github.com/pkg/errors" + pkgerrors "github.com/pkg/errors" feetypes "github.com/smartcontractkit/chainlink/v2/common/fee/types" txmgrtypes "github.com/smartcontractkit/chainlink/v2/common/txmgr/types" @@ -164,14 +165,14 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx return b.StartOnce("Txm", func() error { var ms services.MultiStart if err := ms.Start(ctx, b.broadcaster); err != nil { - return errors.Wrap(err, "Txm: Broadcaster failed to start") + return pkgerrors.Wrap(err, "Txm: Broadcaster failed to start") } if err := ms.Start(ctx, b.confirmer); err != nil { - return errors.Wrap(err, "Txm: Confirmer failed to start") + return pkgerrors.Wrap(err, "Txm: Confirmer failed to start") } if err := ms.Start(ctx, b.txAttemptBuilder); err != nil { - return errors.Wrap(err, "Txm: Estimator failed to start") + return pkgerrors.Wrap(err, "Txm: Estimator failed to start") } b.wg.Add(1) @@ -188,7 +189,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Start(ctx if b.fwdMgr != nil { if err := ms.Start(ctx, b.fwdMgr); err != nil { - return errors.Wrap(err, "Txm: ForwarderManager failed to start") + return pkgerrors.Wrap(err, "Txm: ForwarderManager failed to start") } } @@ -222,7 +223,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) abandon(ad ctx, cancel := utils.StopChan(b.chStop).NewCtx() defer cancel() err = b.txStore.Abandon(ctx, b.chainID, addr) - return errors.Wrapf(err, "abandon failed to update txes for key %s", addr.String()) + return pkgerrors.Wrapf(err, "abandon failed to update txes for key %s", addr.String()) } func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() (merr error) { @@ -239,13 +240,15 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) Close() (m } if b.fwdMgr != nil { if err := b.fwdMgr.Close(); err != nil { - return errors.Wrap(err, "Txm: failed to stop ForwarderManager") + merr = errors.Join(merr, pkgerrors.Wrap(err, "Txm: failed to stop ForwarderManager")) } } b.wg.Wait() - b.txAttemptBuilder.Close() + if err := b.txAttemptBuilder.Close(); err != nil { + merr = errors.Join(merr, pkgerrors.Wrap(err, "Txm: failed to close TxAttemptBuilder")) + } return nil }) @@ -440,7 +443,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CreateTran var existingTx *txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] existingTx, err = b.txStore.FindTxWithIdempotencyKey(ctx, *txRequest.IdempotencyKey, b.chainID) if err != nil && !errors.Is(err, sql.ErrNoRows) { - return tx, errors.Wrap(err, "Failed to search for transaction with IdempotencyKey") + return tx, pkgerrors.Wrap(err, "Failed to search for transaction with IdempotencyKey") } if existingTx != nil { b.logger.Infow("Found a Tx with IdempotencyKey. Returning existing Tx without creating a new one.", "IdempotencyKey", *txRequest.IdempotencyKey) @@ -472,7 +475,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CreateTran err = b.txStore.CheckTxQueueCapacity(ctx, txRequest.FromAddress, b.txConfig.MaxQueued(), b.chainID) if err != nil { - return tx, errors.Wrap(err, "Txm#CreateTransaction") + return tx, pkgerrors.Wrap(err, "Txm#CreateTransaction") } tx, err = b.txStore.CreateTransaction(ctx, txRequest, b.chainID) @@ -482,7 +485,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) CreateTran // Calls forwarderMgr to get a proper forwarder for a given EOA. func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetForwarderForEOA(eoa ADDR) (forwarder ADDR, err error) { if !b.txConfig.ForwardersEnabled() { - return forwarder, errors.Errorf("Forwarding is not enabled, to enable set Transactions.ForwardersEnabled =true") + return forwarder, pkgerrors.Errorf("Forwarding is not enabled, to enable set Transactions.ForwardersEnabled =true") } forwarder, err = b.fwdMgr.ForwarderFor(eoa) return @@ -490,7 +493,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) GetForward func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) checkEnabled(addr ADDR) error { err := b.keyStore.CheckEnabled(addr, b.chainID) - return errors.Wrapf(err, "cannot send transaction from %s on chain ID %s", addr, b.chainID.String()) + return pkgerrors.Wrapf(err, "cannot send transaction from %s on chain ID %s", addr, b.chainID.String()) } // SendNativeToken creates a transaction that transfers the given value of native tokens @@ -507,7 +510,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) SendNative Strategy: NewSendEveryStrategy(), } etx, err = b.txStore.CreateTransaction(ctx, txRequest, chainID) - return etx, errors.Wrap(err, "SendNativeToken failed to insert tx") + return etx, pkgerrors.Wrap(err, "SendNativeToken failed to insert tx") } type NullTxManager[ diff --git a/integration-tests/testsetups/ocr.go b/integration-tests/testsetups/ocr.go index 0eae4b52b63..048f3124ad9 100644 --- a/integration-tests/testsetups/ocr.go +++ b/integration-tests/testsetups/ocr.go @@ -24,6 +24,8 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/require" + "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" + "github.com/smartcontractkit/chainlink-env/environment" "github.com/smartcontractkit/chainlink-env/pkg/helm/chainlink" "github.com/smartcontractkit/chainlink-env/pkg/helm/ethereum" @@ -34,7 +36,6 @@ import ( "github.com/smartcontractkit/chainlink-testing-framework/logging" "github.com/smartcontractkit/chainlink-testing-framework/networks" reportModel "github.com/smartcontractkit/chainlink-testing-framework/testreporters" - "github.com/smartcontractkit/libocr/gethwrappers/offchainaggregator" "github.com/smartcontractkit/chainlink/integration-tests/actions" "github.com/smartcontractkit/chainlink/integration-tests/client" @@ -120,8 +121,7 @@ func NewOCRSoakTest(t *testing.T, forwarderFlow bool) (*OCRSoakTest, error) { ocrRoundStates: make([]*testreporters.OCRRoundState, 0), ocrInstanceMap: make(map[string]contracts.OffchainAggregator), } - test.ensureInputValues() - return test, nil + return test, test.ensureInputValues() } // DeployEnvironment deploys the test environment, starting all Chainlink nodes and other components for the test From 4e07f4f0aa72ddb76bd317000a3c82f6bffad2ac Mon Sep 17 00:00:00 2001 From: Jordan Krage Date: Mon, 16 Oct 2023 05:24:58 -0500 Subject: [PATCH 4/9] core/services/keystore: *memoryORM.saveEncryptedKeyRing - handle callback errors (#10958) --- core/services/keystore/keystoretest.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/core/services/keystore/keystoretest.go b/core/services/keystore/keystoretest.go index 7e0280f974a..0b5ce4e0057 100644 --- a/core/services/keystore/keystoretest.go +++ b/core/services/keystore/keystoretest.go @@ -1,6 +1,7 @@ package keystore import ( + "errors" "sync" "github.com/smartcontractkit/sqlx" @@ -26,14 +27,14 @@ func (o *memoryORM) isEmpty() (bool, error) { return false, nil } -func (o *memoryORM) saveEncryptedKeyRing(kr *encryptedKeyRing, callbacks ...func(pg.Queryer) error) error { +func (o *memoryORM) saveEncryptedKeyRing(kr *encryptedKeyRing, callbacks ...func(pg.Queryer) error) (err error) { o.mu.Lock() defer o.mu.Unlock() o.keyRing = kr for _, c := range callbacks { - c(o.q) + err = errors.Join(err, c(o.q)) } - return nil + return } func (o *memoryORM) getEncryptedKeyRing() (encryptedKeyRing, error) { From 7bede8e0758307a17be51b46a2b0e10b129c9630 Mon Sep 17 00:00:00 2001 From: Rens Rooimans Date: Mon, 16 Oct 2023 13:01:20 +0200 Subject: [PATCH 5/9] rm derived price feed (#10926) --- contracts/package.json | 2 +- .../scripts/native_solc_compile_all_feeds | 3 - contracts/src/v0.8/dev/DerivedPriceFeed.sol | 87 ---- .../test/v0.8/dev/DerivedPriceFeed.test.ts | 141 ------- .../derived_price_feed_wrapper.go | 395 ------------------ ...rapper-dependency-versions-do-not-edit.txt | 1 - core/gethwrappers/go_generate.go | 1 - core/scripts/chaincli/command/feed/deploy.go | 21 - core/scripts/chaincli/command/feed/root.go | 16 - core/scripts/chaincli/command/root.go | 2 - core/scripts/chaincli/handler/feed.go | 55 --- 11 files changed, 1 insertion(+), 723 deletions(-) delete mode 100644 contracts/src/v0.8/dev/DerivedPriceFeed.sol delete mode 100644 contracts/test/v0.8/dev/DerivedPriceFeed.test.ts delete mode 100644 core/gethwrappers/generated/derived_price_feed_wrapper/derived_price_feed_wrapper.go delete mode 100644 core/scripts/chaincli/command/feed/deploy.go delete mode 100644 core/scripts/chaincli/command/feed/root.go delete mode 100644 core/scripts/chaincli/handler/feed.go diff --git a/contracts/package.json b/contracts/package.json index 686140a5dce..f4a646f11b7 100644 --- a/contracts/package.json +++ b/contracts/package.json @@ -18,7 +18,7 @@ "prepublishOnly": "pnpm compile && ./scripts/prepublish_generate_abi_folder", "publish-beta": "pnpm publish --tag beta", "publish-prod": "npm dist-tag add @chainlink/contracts@0.8.0 latest", - "solhint": "solhint --max-warnings 502 \"./src/v0.8/**/*.sol\"" + "solhint": "solhint --max-warnings 493 \"./src/v0.8/**/*.sol\"" }, "files": [ "src/v0.8", diff --git a/contracts/scripts/native_solc_compile_all_feeds b/contracts/scripts/native_solc_compile_all_feeds index e37ca833ac6..2bbd9fe869c 100755 --- a/contracts/scripts/native_solc_compile_all_feeds +++ b/contracts/scripts/native_solc_compile_all_feeds @@ -30,6 +30,3 @@ compileContract () { compileContract interfaces/AggregatorV2V3Interface.sol compileContract Chainlink.sol compileContract ChainlinkClient.sol - -# Feeds -compileContract dev/DerivedPriceFeed.sol \ No newline at end of file diff --git a/contracts/src/v0.8/dev/DerivedPriceFeed.sol b/contracts/src/v0.8/dev/DerivedPriceFeed.sol deleted file mode 100644 index 73f8e8c2202..00000000000 --- a/contracts/src/v0.8/dev/DerivedPriceFeed.sol +++ /dev/null @@ -1,87 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.6; - -import "../interfaces/AggregatorV3Interface.sol"; - -/** - * Network: Fantom Testnet - * Base: LINK/USD - * Base Address: 0x6d5689Ad4C1806D1BA0c70Ab95ebe0Da6B204fC5 - * Quote: FTM/USD - * Quote Address: 0xe04676B9A9A2973BCb0D1478b5E1E9098BBB7f3D - * Decimals: 18 - * - * Network: AVAX Testnet - * Base: LINK/USD - * Base Address: 0x34C4c526902d88a3Aa98DB8a9b802603EB1E3470 - * Quote: AVAX/USD - * Quote Address: 0x5498BB86BC934c8D34FDA08E81D444153d0D06aD - * Decimals: 18 - * - * Chainlink Data Feeds can be used in combination to derive denominated price pairs in other - * currencies. - * - * If you require a denomination other than what is provided, you can use two data feeds to derive - * the pair that you need. - * - * For example, if you needed a LINK / FTM price, you could take the LINK / USD feed and the - * FTM / USD feed and derive LINK / FTM using division. - * (LINK/USD)/(FTM/USD) = LINK/FTM - */ -contract DerivedPriceFeed is AggregatorV3Interface { - uint256 public constant override version = 0; - - AggregatorV3Interface public immutable BASE; - AggregatorV3Interface public immutable QUOTE; - uint8 public immutable DECIMALS; - - constructor(address _base, address _quote, uint8 _decimals) { - require(_decimals > uint8(0) && _decimals <= uint8(18), "Invalid _decimals"); - DECIMALS = _decimals; - BASE = AggregatorV3Interface(_base); - QUOTE = AggregatorV3Interface(_quote); - } - - function decimals() external view override returns (uint8) { - return DECIMALS; - } - - function getRoundData(uint80) external pure override returns (uint80, int256, uint256, uint256, uint80) { - revert("not implemented - use latestRoundData()"); - } - - function description() external pure override returns (string memory) { - return "DerivedPriceFeed.sol"; - } - - function latestRoundData() - external - view - override - returns (uint80 roundId, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound) - { - return (uint80(0), getDerivedPrice(), block.timestamp, block.timestamp, uint80(0)); - } - - // https://docs.chain.link/docs/get-the-latest-price/#getting-a-different-price-denomination - function getDerivedPrice() internal view returns (int256) { - (, int256 basePrice, , , ) = BASE.latestRoundData(); - uint8 baseDecimals = BASE.decimals(); - basePrice = scalePrice(basePrice, baseDecimals, DECIMALS); - - (, int256 quotePrice, , , ) = QUOTE.latestRoundData(); - uint8 quoteDecimals = QUOTE.decimals(); - quotePrice = scalePrice(quotePrice, quoteDecimals, DECIMALS); - - return (basePrice * int256(10 ** uint256(DECIMALS))) / quotePrice; - } - - function scalePrice(int256 _price, uint8 _priceDecimals, uint8 _decimals) internal pure returns (int256) { - if (_priceDecimals < _decimals) { - return _price * int256(10 ** uint256(_decimals - _priceDecimals)); - } else if (_priceDecimals > _decimals) { - return _price / int256(10 ** uint256(_priceDecimals - _decimals)); - } - return _price; - } -} diff --git a/contracts/test/v0.8/dev/DerivedPriceFeed.test.ts b/contracts/test/v0.8/dev/DerivedPriceFeed.test.ts deleted file mode 100644 index 5171a44d1f5..00000000000 --- a/contracts/test/v0.8/dev/DerivedPriceFeed.test.ts +++ /dev/null @@ -1,141 +0,0 @@ -import { ethers } from 'hardhat' -import { BigNumber, ContractFactory } from 'ethers' -import { expect } from 'chai' -import { describe } from 'mocha' - -describe('DerivedPriceFeed', () => { - let mockAggFactory: ContractFactory - let derivedFeedFactory: ContractFactory - before(async () => { - const accounts = await ethers.getSigners() - mockAggFactory = await ethers.getContractFactory( - 'src/v0.7/tests/MockV3Aggregator.sol:MockV3Aggregator', - accounts[0], - ) - derivedFeedFactory = await ethers.getContractFactory( - 'src/v0.8/dev/DerivedPriceFeed.sol:DerivedPriceFeed', - accounts[0], - ) - }) - - it('reverts on getRoundData', async () => { - let base = await mockAggFactory.deploy(8, 10e8) // Price = 10 - let quote = await mockAggFactory.deploy(8, 5e8) // Price = 5 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 8, - ) - - await expect(derived.getRoundData(1)).to.be.reverted - }) - - it('returns decimals', async () => { - let base = await mockAggFactory.deploy(8, 10e8) // Price = 10 - let quote = await mockAggFactory.deploy(8, 5e8) // Price = 5 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 9, - ) - - await expect(await derived.decimals()).to.equal(9) - }) - - describe('calculates price', async () => { - it('when all decimals are the same', async () => { - let base = await mockAggFactory.deploy(8, 10e8) // 10 - let quote = await mockAggFactory.deploy(8, 5e8) // 5 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 8, - ) - - await expect((await derived.latestRoundData()).answer).to.equal( - 2e8 /* 2 */, - ) - }) - - it('when all decimals are the same 2', async () => { - let base = await mockAggFactory.deploy(8, 3e8) // 3 - let quote = await mockAggFactory.deploy(8, 15e8) // 15 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 8, - ) - - await expect((await derived.latestRoundData()).answer).to.equal( - 0.2e8 /* 0.2 */, - ) - }) - - it('when result decimals are higher', async () => { - let base = await mockAggFactory.deploy(8, 10e8) // Price = 10 - let quote = await mockAggFactory.deploy(8, 5e8) // Price = 5 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 12, - ) - - await expect((await derived.latestRoundData()).answer).to.equal( - 2e12 /* 2 */, - ) - }) - - it('when result decimals are lower', async () => { - let base = await mockAggFactory.deploy(8, 10e8) // Price = 10 - let quote = await mockAggFactory.deploy(8, 5e8) // Price = 5 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 6, - ) - - await expect((await derived.latestRoundData()).answer).to.equal( - 2e6 /* 2 */, - ) - }) - - it('base decimals are higher', async () => { - let base = await mockAggFactory.deploy( - 16, - BigNumber.from('100000000000000000'), - ) // Price = 10 - let quote = await mockAggFactory.deploy(8, 5e8) // Price = 5 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 10, - ) - - await expect((await derived.latestRoundData()).answer).to.equal( - 2e10 /* 2 */, - ) - }) - - it('base decimals are lower', async () => { - let base = await mockAggFactory.deploy(6, 10e6) // Price = 10 - let quote = await mockAggFactory.deploy(8, 5e8) // Price = 5 - - let derived = await derivedFeedFactory.deploy( - base.address, - quote.address, - 10, - ) - - await expect((await derived.latestRoundData()).answer).to.equal( - 2e10 /* 2 */, - ) - }) - }) -}) diff --git a/core/gethwrappers/generated/derived_price_feed_wrapper/derived_price_feed_wrapper.go b/core/gethwrappers/generated/derived_price_feed_wrapper/derived_price_feed_wrapper.go deleted file mode 100644 index 5e4e4be0e36..00000000000 --- a/core/gethwrappers/generated/derived_price_feed_wrapper/derived_price_feed_wrapper.go +++ /dev/null @@ -1,395 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package derived_price_feed_wrapper - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription - _ = abi.ConvertType -) - -var DerivedPriceFeedMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_base\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_quote\",\"type\":\"address\"},{\"internalType\":\"uint8\",\"name\":\"_decimals\",\"type\":\"uint8\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"BASE\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"DECIMALS\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"QUOTE\",\"outputs\":[{\"internalType\":\"contractAggregatorV3Interface\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"decimals\",\"outputs\":[{\"internalType\":\"uint8\",\"name\":\"\",\"type\":\"uint8\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"description\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint80\",\"name\":\"\",\"type\":\"uint80\"}],\"name\":\"getRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"\",\"type\":\"uint80\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"latestRoundData\",\"outputs\":[{\"internalType\":\"uint80\",\"name\":\"roundId\",\"type\":\"uint80\"},{\"internalType\":\"int256\",\"name\":\"answer\",\"type\":\"int256\"},{\"internalType\":\"uint256\",\"name\":\"startedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"updatedAt\",\"type\":\"uint256\"},{\"internalType\":\"uint80\",\"name\":\"answeredInRound\",\"type\":\"uint80\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", - Bin: "0x60e060405234801561001057600080fd5b50604051610c14380380610c1483398101604081905261002f916100ec565b60ff8116158015906100455750601260ff821611155b6100895760405162461bcd60e51b8152602060048201526011602482015270496e76616c6964205f646563696d616c7360781b604482015260640160405180910390fd5b60f81b7fff000000000000000000000000000000000000000000000000000000000000001660c052606091821b6001600160601b0319908116608052911b1660a052610139565b80516001600160a01b03811681146100e757600080fd5b919050565b60008060006060848603121561010157600080fd5b61010a846100d0565b9250610118602085016100d0565b9150604084015160ff8116811461012e57600080fd5b809150509250925092565b60805160601c60a05160601c60c05160f81c610a6d6101a76000396000818160920152818160cd0152818161041a0152818161058f01526105bd0152600081816101950152818161044401526104ea0152600081816101e1015281816102cf01526103750152610a6d6000f3fe608060405234801561001057600080fd5b50600436106100885760003560e01c80639a6fc8f51161005b5780639a6fc8f5146101465780639c57983914610190578063ec342ad0146101dc578063feaf968c1461020357600080fd5b80632e0f26251461008d578063313ce567146100cb57806354fd4d50146100f15780637284e41614610107575b600080fd5b6100b47f000000000000000000000000000000000000000000000000000000000000000081565b60405160ff90911681526020015b60405180910390f35b7f00000000000000000000000000000000000000000000000000000000000000006100b4565b6100f9600081565b6040519081526020016100c2565b604080518082018252601481527f446572697665645072696365466565642e736f6c000000000000000000000000602082015290516100c2919061070c565b610159610154366004610674565b61020b565b6040805169ffffffffffffffffffff968716815260208101959095528401929092526060830152909116608082015260a0016100c2565b6101b77f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100c2565b6101b77f000000000000000000000000000000000000000000000000000000000000000081565b6101596102a6565b60008060008060006040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161029d9060208082526027908201527f6e6f7420696d706c656d656e746564202d20757365206c6174657374526f756e60408201527f6444617461282900000000000000000000000000000000000000000000000000606082015260800190565b60405180910390fd5b6000806000806000806102b76102ca565b9096909550429450849350600092509050565b6000807f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b15801561033357600080fd5b505afa158015610347573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061036b9190610691565b50505091505060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663313ce5676040518163ffffffff1660e01b815260040160206040518083038186803b1580156103d957600080fd5b505afa1580156103ed573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061041191906106e9565b905061043e82827f0000000000000000000000000000000000000000000000000000000000000000610601565b915060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663feaf968c6040518163ffffffff1660e01b815260040160a06040518083038186803b1580156104a857600080fd5b505afa1580156104bc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104e09190610691565b50505091505060007f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff1663313ce5676040518163ffffffff1660e01b815260040160206040518083038186803b15801561054e57600080fd5b505afa158015610562573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061058691906106e9565b90506105b382827f0000000000000000000000000000000000000000000000000000000000000000610601565b9150816105e460ff7f000000000000000000000000000000000000000000000000000000000000000016600a61086f565b6105ee9086610937565b6105f8919061077f565b94505050505090565b60008160ff168360ff16101561063a5761061b83836109f3565b6106299060ff16600a61086f565b6106339085610937565b905061066d565b8160ff168360ff16111561066a5761065282846109f3565b6106609060ff16600a61086f565b610633908561077f565b50825b9392505050565b60006020828403121561068657600080fd5b813561066d81610a45565b600080600080600060a086880312156106a957600080fd5b85516106b481610a45565b8095505060208601519350604086015192506060860151915060808601516106db81610a45565b809150509295509295909350565b6000602082840312156106fb57600080fd5b815160ff8116811461066d57600080fd5b600060208083528351808285015260005b818110156107395785810183015185820160400152820161071d565b8181111561074b576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b6000826107b5577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff83147f80000000000000000000000000000000000000000000000000000000000000008314161561080957610809610a16565b500590565b600181815b8085111561086757817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111561084d5761084d610a16565b8085161561085a57918102915b93841c9390800290610813565b509250929050565b600061066d838360008261088557506001610931565b8161089257506000610931565b81600181146108a857600281146108b2576108ce565b6001915050610931565b60ff8411156108c3576108c3610a16565b50506001821b610931565b5060208310610133831016604e8410600b84101617156108f1575081810a610931565b6108fb838361080e565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0482111561092d5761092d610a16565b0290505b92915050565b60007f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60008413600084138583048511828216161561097857610978610a16565b7f800000000000000000000000000000000000000000000000000000000000000060008712868205881281841616156109b3576109b3610a16565b600087129250878205871284841616156109cf576109cf610a16565b878505871281841616156109e5576109e5610a16565b505050929093029392505050565b600060ff821660ff841680821015610a0d57610a0d610a16565b90039392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b69ffffffffffffffffffff81168114610a5d57600080fd5b5056fea164736f6c6343000806000a", -} - -var DerivedPriceFeedABI = DerivedPriceFeedMetaData.ABI - -var DerivedPriceFeedBin = DerivedPriceFeedMetaData.Bin - -func DeployDerivedPriceFeed(auth *bind.TransactOpts, backend bind.ContractBackend, _base common.Address, _quote common.Address, _decimals uint8) (common.Address, *types.Transaction, *DerivedPriceFeed, error) { - parsed, err := DerivedPriceFeedMetaData.GetAbi() - if err != nil { - return common.Address{}, nil, nil, err - } - if parsed == nil { - return common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - - address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(DerivedPriceFeedBin), backend, _base, _quote, _decimals) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &DerivedPriceFeed{DerivedPriceFeedCaller: DerivedPriceFeedCaller{contract: contract}, DerivedPriceFeedTransactor: DerivedPriceFeedTransactor{contract: contract}, DerivedPriceFeedFilterer: DerivedPriceFeedFilterer{contract: contract}}, nil -} - -type DerivedPriceFeed struct { - address common.Address - abi abi.ABI - DerivedPriceFeedCaller - DerivedPriceFeedTransactor - DerivedPriceFeedFilterer -} - -type DerivedPriceFeedCaller struct { - contract *bind.BoundContract -} - -type DerivedPriceFeedTransactor struct { - contract *bind.BoundContract -} - -type DerivedPriceFeedFilterer struct { - contract *bind.BoundContract -} - -type DerivedPriceFeedSession struct { - Contract *DerivedPriceFeed - CallOpts bind.CallOpts - TransactOpts bind.TransactOpts -} - -type DerivedPriceFeedCallerSession struct { - Contract *DerivedPriceFeedCaller - CallOpts bind.CallOpts -} - -type DerivedPriceFeedTransactorSession struct { - Contract *DerivedPriceFeedTransactor - TransactOpts bind.TransactOpts -} - -type DerivedPriceFeedRaw struct { - Contract *DerivedPriceFeed -} - -type DerivedPriceFeedCallerRaw struct { - Contract *DerivedPriceFeedCaller -} - -type DerivedPriceFeedTransactorRaw struct { - Contract *DerivedPriceFeedTransactor -} - -func NewDerivedPriceFeed(address common.Address, backend bind.ContractBackend) (*DerivedPriceFeed, error) { - abi, err := abi.JSON(strings.NewReader(DerivedPriceFeedABI)) - if err != nil { - return nil, err - } - contract, err := bindDerivedPriceFeed(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &DerivedPriceFeed{address: address, abi: abi, DerivedPriceFeedCaller: DerivedPriceFeedCaller{contract: contract}, DerivedPriceFeedTransactor: DerivedPriceFeedTransactor{contract: contract}, DerivedPriceFeedFilterer: DerivedPriceFeedFilterer{contract: contract}}, nil -} - -func NewDerivedPriceFeedCaller(address common.Address, caller bind.ContractCaller) (*DerivedPriceFeedCaller, error) { - contract, err := bindDerivedPriceFeed(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &DerivedPriceFeedCaller{contract: contract}, nil -} - -func NewDerivedPriceFeedTransactor(address common.Address, transactor bind.ContractTransactor) (*DerivedPriceFeedTransactor, error) { - contract, err := bindDerivedPriceFeed(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &DerivedPriceFeedTransactor{contract: contract}, nil -} - -func NewDerivedPriceFeedFilterer(address common.Address, filterer bind.ContractFilterer) (*DerivedPriceFeedFilterer, error) { - contract, err := bindDerivedPriceFeed(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &DerivedPriceFeedFilterer{contract: contract}, nil -} - -func bindDerivedPriceFeed(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := DerivedPriceFeedMetaData.GetAbi() - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil -} - -func (_DerivedPriceFeed *DerivedPriceFeedRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _DerivedPriceFeed.Contract.DerivedPriceFeedCaller.contract.Call(opts, result, method, params...) -} - -func (_DerivedPriceFeed *DerivedPriceFeedRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DerivedPriceFeed.Contract.DerivedPriceFeedTransactor.contract.Transfer(opts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _DerivedPriceFeed.Contract.DerivedPriceFeedTransactor.contract.Transact(opts, method, params...) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _DerivedPriceFeed.Contract.contract.Call(opts, result, method, params...) -} - -func (_DerivedPriceFeed *DerivedPriceFeedTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _DerivedPriceFeed.Contract.contract.Transfer(opts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _DerivedPriceFeed.Contract.contract.Transact(opts, method, params...) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) BASE(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "BASE") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) BASE() (common.Address, error) { - return _DerivedPriceFeed.Contract.BASE(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) BASE() (common.Address, error) { - return _DerivedPriceFeed.Contract.BASE(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) DECIMALS(opts *bind.CallOpts) (uint8, error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "DECIMALS") - - if err != nil { - return *new(uint8), err - } - - out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) - - return out0, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) DECIMALS() (uint8, error) { - return _DerivedPriceFeed.Contract.DECIMALS(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) DECIMALS() (uint8, error) { - return _DerivedPriceFeed.Contract.DECIMALS(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) QUOTE(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "QUOTE") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) QUOTE() (common.Address, error) { - return _DerivedPriceFeed.Contract.QUOTE(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) QUOTE() (common.Address, error) { - return _DerivedPriceFeed.Contract.QUOTE(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) Decimals(opts *bind.CallOpts) (uint8, error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "decimals") - - if err != nil { - return *new(uint8), err - } - - out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) - - return out0, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) Decimals() (uint8, error) { - return _DerivedPriceFeed.Contract.Decimals(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) Decimals() (uint8, error) { - return _DerivedPriceFeed.Contract.Decimals(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) Description(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "description") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) Description() (string, error) { - return _DerivedPriceFeed.Contract.Description(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) Description() (string, error) { - return _DerivedPriceFeed.Contract.Description(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) GetRoundData(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, *big.Int, error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "getRoundData", arg0) - - if err != nil { - return *new(*big.Int), *new(*big.Int), *new(*big.Int), *new(*big.Int), *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) - out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) - out3 := *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) - out4 := *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) - - return out0, out1, out2, out3, out4, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) GetRoundData(arg0 *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, *big.Int, error) { - return _DerivedPriceFeed.Contract.GetRoundData(&_DerivedPriceFeed.CallOpts, arg0) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) GetRoundData(arg0 *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, *big.Int, error) { - return _DerivedPriceFeed.Contract.GetRoundData(&_DerivedPriceFeed.CallOpts, arg0) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) LatestRoundData(opts *bind.CallOpts) (LatestRoundData, - - error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "latestRoundData") - - outstruct := new(LatestRoundData) - if err != nil { - return *outstruct, err - } - - outstruct.RoundId = *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - outstruct.Answer = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) - outstruct.StartedAt = *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) - outstruct.UpdatedAt = *abi.ConvertType(out[3], new(*big.Int)).(**big.Int) - outstruct.AnsweredInRound = *abi.ConvertType(out[4], new(*big.Int)).(**big.Int) - - return *outstruct, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) LatestRoundData() (LatestRoundData, - - error) { - return _DerivedPriceFeed.Contract.LatestRoundData(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) LatestRoundData() (LatestRoundData, - - error) { - return _DerivedPriceFeed.Contract.LatestRoundData(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCaller) Version(opts *bind.CallOpts) (*big.Int, error) { - var out []interface{} - err := _DerivedPriceFeed.contract.Call(opts, &out, "version") - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -func (_DerivedPriceFeed *DerivedPriceFeedSession) Version() (*big.Int, error) { - return _DerivedPriceFeed.Contract.Version(&_DerivedPriceFeed.CallOpts) -} - -func (_DerivedPriceFeed *DerivedPriceFeedCallerSession) Version() (*big.Int, error) { - return _DerivedPriceFeed.Contract.Version(&_DerivedPriceFeed.CallOpts) -} - -type LatestRoundData struct { - RoundId *big.Int - Answer *big.Int - StartedAt *big.Int - UpdatedAt *big.Int - AnsweredInRound *big.Int -} - -func (_DerivedPriceFeed *DerivedPriceFeed) Address() common.Address { - return _DerivedPriceFeed.address -} - -type DerivedPriceFeedInterface interface { - BASE(opts *bind.CallOpts) (common.Address, error) - - DECIMALS(opts *bind.CallOpts) (uint8, error) - - QUOTE(opts *bind.CallOpts) (common.Address, error) - - Decimals(opts *bind.CallOpts) (uint8, error) - - Description(opts *bind.CallOpts) (string, error) - - GetRoundData(opts *bind.CallOpts, arg0 *big.Int) (*big.Int, *big.Int, *big.Int, *big.Int, *big.Int, error) - - LatestRoundData(opts *bind.CallOpts) (LatestRoundData, - - error) - - Version(opts *bind.CallOpts) (*big.Int, error) - - Address() common.Address -} diff --git a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt index 91bd67ca505..da65cc085ff 100644 --- a/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt +++ b/core/gethwrappers/generation/generated-wrapper-dependency-versions-do-not-edit.txt @@ -15,7 +15,6 @@ chain_specific_util_helper: ../../contracts/solc/v0.8.6/ChainSpecificUtilHelper. consumer_wrapper: ../../contracts/solc/v0.7/Consumer.abi ../../contracts/solc/v0.7/Consumer.bin 894d1cbd920dccbd36d92918c1037c6ded34f66f417ccb18ec3f33c64ef83ec5 cron_upkeep_factory_wrapper: ../../contracts/solc/v0.8.6/CronUpkeepFactory.abi - dacb0f8cdf54ae9d2781c5e720fc314b32ed5e58eddccff512c75d6067292cd7 cron_upkeep_wrapper: ../../contracts/solc/v0.8.6/CronUpkeep.abi - 362fcfcf30a6ab3acff83095ea4b2b9056dd5e9dcb94bc5411aae58995d22709 -derived_price_feed_wrapper: ../../contracts/solc/v0.8.6/DerivedPriceFeed.abi ../../contracts/solc/v0.8.6/DerivedPriceFeed.bin c8542e6c850c2d0fffb79a7f7213dc927ec64e6ddd54e1224cb2fb4a13aabdd0 dummy_protocol_wrapper: ../../contracts/solc/v0.8.16/DummyProtocol.abi ../../contracts/solc/v0.8.16/DummyProtocol.bin 583a448170b13abf7ed64e406e8177d78c9e55ab44efd141eee60de23a71ee3b flags_wrapper: ../../contracts/solc/v0.6/Flags.abi ../../contracts/solc/v0.6/Flags.bin 2034d1b562ca37a63068851915e3703980276e8d5f7db6db8a3351a49d69fc4a flux_aggregator_wrapper: ../../contracts/solc/v0.6/FluxAggregator.abi ../../contracts/solc/v0.6/FluxAggregator.bin a3b0a6396c4aa3b5ee39b3c4bd45efc89789d4859379a8a92caca3a0496c5794 diff --git a/core/gethwrappers/go_generate.go b/core/gethwrappers/go_generate.go index 2680b899b1e..99fb80a5118 100644 --- a/core/gethwrappers/go_generate.go +++ b/core/gethwrappers/go_generate.go @@ -127,7 +127,6 @@ package gethwrappers // Aggregators //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV2V3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV2V3Interface.bin AggregatorV2V3Interface aggregator_v2v3_interface //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/AggregatorV3Interface.abi ../../contracts/solc/v0.8.6/AggregatorV3Interface.bin AggregatorV3Interface aggregator_v3_interface -//go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/DerivedPriceFeed.abi ../../contracts/solc/v0.8.6/DerivedPriceFeed.bin DerivedPriceFeed derived_price_feed_wrapper //go:generate go run ./generation/generate/wrap.go ../../contracts/solc/v0.8.6/MockAggregatorProxy.abi ../../contracts/solc/v0.8.6/MockAggregatorProxy.bin MockAggregatorProxy mock_aggregator_proxy // Log tester diff --git a/core/scripts/chaincli/command/feed/deploy.go b/core/scripts/chaincli/command/feed/deploy.go deleted file mode 100644 index 7397c6154f4..00000000000 --- a/core/scripts/chaincli/command/feed/deploy.go +++ /dev/null @@ -1,21 +0,0 @@ -package feed - -import ( - "github.com/spf13/cobra" - - "github.com/smartcontractkit/chainlink/core/scripts/chaincli/config" - "github.com/smartcontractkit/chainlink/core/scripts/chaincli/handler" -) - -// deployCmd represents the command to run the service. -var deployCmd = &cobra.Command{ - Use: "deploy", - Short: "Deploy price feed", - Long: `This command deploys price feeds.`, - Run: func(cmd *cobra.Command, args []string) { - cfg := config.New() - hdlr := handler.NewFeed(cfg) - - hdlr.DeployDerivedPriceFeed(cmd.Context()) - }, -} diff --git a/core/scripts/chaincli/command/feed/root.go b/core/scripts/chaincli/command/feed/root.go deleted file mode 100644 index 89d93efcb9b..00000000000 --- a/core/scripts/chaincli/command/feed/root.go +++ /dev/null @@ -1,16 +0,0 @@ -package feed - -import ( - "github.com/spf13/cobra" -) - -// RootCmd represents the root price feed sub-command to manage feeds. -var RootCmd = &cobra.Command{ - Use: "feed", - Short: "Manage price feeds", - Long: `This command represents a CLI interface to manage Chainlink Price Feeds.`, -} - -func init() { - RootCmd.AddCommand(deployCmd) -} diff --git a/core/scripts/chaincli/command/root.go b/core/scripts/chaincli/command/root.go index 8f5e10e74f3..06e6e61a47d 100644 --- a/core/scripts/chaincli/command/root.go +++ b/core/scripts/chaincli/command/root.go @@ -7,7 +7,6 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - "github.com/smartcontractkit/chainlink/core/scripts/chaincli/command/feed" "github.com/smartcontractkit/chainlink/core/scripts/chaincli/command/keeper" ) @@ -34,7 +33,6 @@ func init() { _ = viper.BindPFlag("config", RootCmd.PersistentFlags().Lookup("config")) RootCmd.AddCommand(keeper.RootCmd) - RootCmd.AddCommand(feed.RootCmd) RootCmd.AddCommand(BootstrapNodeCmd) RootCmd.AddCommand(RevertReasonCmd) } diff --git a/core/scripts/chaincli/handler/feed.go b/core/scripts/chaincli/handler/feed.go deleted file mode 100644 index b8481d4a3fc..00000000000 --- a/core/scripts/chaincli/handler/feed.go +++ /dev/null @@ -1,55 +0,0 @@ -package handler - -import ( - "context" - "log" - - "github.com/ethereum/go-ethereum/common" - - "github.com/smartcontractkit/chainlink/core/scripts/chaincli/config" - feed "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/generated/derived_price_feed_wrapper" -) - -// Feed is the price feeds commands handler -type Feed struct { - *baseHandler - - baseAddress common.Address - quoteAddress common.Address - decimals uint8 -} - -// NewFeed is the constructor of Feed -func NewFeed(cfg *config.Config) *Feed { - return &Feed{ - baseHandler: NewBaseHandler(cfg), - baseAddress: common.HexToAddress(cfg.FeedBaseAddr), - quoteAddress: common.HexToAddress(cfg.FeedQuoteAddr), - decimals: cfg.FeedDecimals, - } -} - -// DeployDerivedPriceFeed deploys and approves the derived price feed. -func (h *Feed) DeployDerivedPriceFeed(ctx context.Context) { - // Deploy derived price feed - log.Println("Deploying derived price feed...") - feedAddr, deployFeedTx, _, err := feed.DeployDerivedPriceFeed(h.buildTxOpts(ctx), h.client, - h.baseAddress, - h.quoteAddress, - h.decimals, - ) - if err != nil { - log.Fatal("DeployDerivedPriceFeed failed: ", err) - } - log.Println("Waiting for derived price feed contract deployment confirmation...", deployFeedTx.Hash().Hex()) - h.waitDeployment(ctx, deployFeedTx) - log.Println(feedAddr.Hex(), ": Derived price feed successfully deployed - ", deployFeedTx.Hash().Hex()) - - // Approve derived price feed - approveRegistryTx, err := h.linkToken.Approve(h.buildTxOpts(ctx), feedAddr, h.approveAmount) - if err != nil { - log.Fatal(feedAddr.Hex(), ": Approve failed - ", err) - } - h.waitTx(ctx, approveRegistryTx) - log.Println(feedAddr.Hex(), ": Derived price feed successfully approved - ", approveRegistryTx.Hash().Hex()) -} From 3f492fe7affb2575daf974ce44d0b2b983eee535 Mon Sep 17 00:00:00 2001 From: Makram Date: Mon, 16 Oct 2023 15:44:35 +0300 Subject: [PATCH 6/9] chore: prefix internal functions w/ underscore (#10943) * chore: prefix internal functions w/ underscore Where applicable and where it doesn't break existing contracts and backwards compatibility. * fix: solhint --- .../src/v0.8/vrf/BatchBlockhashStore.sol | 5 +- .../src/v0.8/vrf/BatchVRFCoordinatorV2.sol | 12 +- contracts/src/v0.8/vrf/KeepersVRFConsumer.sol | 6 +- contracts/src/v0.8/vrf/VRF.sol | 149 ++++++++---------- contracts/src/v0.8/vrf/VRFCoordinatorV2.sol | 38 ++--- contracts/src/v0.8/vrf/VRFOwner.sol | 21 ++- contracts/src/v0.8/vrf/VRFV2Wrapper.sol | 28 ++-- .../vrf/dev/BatchVRFCoordinatorV2Plus.sol | 12 +- .../src/v0.8/vrf/dev/SubscriptionAPI.sol | 10 +- .../src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol | 52 +++--- .../src/v0.8/vrf/dev/VRFV2PlusWrapper.sol | 39 ++--- .../testhelpers/ExposedVRFCoordinatorV2_5.sol | 6 +- .../VRFCoordinatorV2PlusUpgradedVersion.sol | 52 +++--- .../VRFCoordinatorV2Plus_V2Example.sol | 5 +- .../testhelpers/VRFV2PlusConsumerExample.sol | 7 +- .../v0.8/vrf/testhelpers/VRFTestHelper.sol | 28 ++-- 16 files changed, 205 insertions(+), 265 deletions(-) diff --git a/contracts/src/v0.8/vrf/BatchBlockhashStore.sol b/contracts/src/v0.8/vrf/BatchBlockhashStore.sol index 54054344fe7..e55616924cd 100644 --- a/contracts/src/v0.8/vrf/BatchBlockhashStore.sol +++ b/contracts/src/v0.8/vrf/BatchBlockhashStore.sol @@ -29,7 +29,7 @@ contract BatchBlockhashStore { for (uint256 i = 0; i < blockNumbers.length; i++) { // skip the block if it's not storeable, the caller will have to check // after the transaction is mined to see if the blockhash was truly stored. - if (!storeableBlock(blockNumbers[i])) { + if (!_storeableBlock(blockNumbers[i])) { continue; } BHS.store(blockNumbers[i]); @@ -73,8 +73,7 @@ contract BatchBlockhashStore { * using the blockhash() instruction. * @param blockNumber the block number to check if it's storeable with blockhash() */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function storeableBlock(uint256 blockNumber) private view returns (bool) { + function _storeableBlock(uint256 blockNumber) private view returns (bool) { // handle edge case on simulated chains which possibly have < 256 blocks total. return ChainSpecificUtil._getBlockNumber() <= 256 ? true : blockNumber >= (ChainSpecificUtil._getBlockNumber() - 256); diff --git a/contracts/src/v0.8/vrf/BatchVRFCoordinatorV2.sol b/contracts/src/v0.8/vrf/BatchVRFCoordinatorV2.sol index 1072289e88e..b35df41d1e3 100644 --- a/contracts/src/v0.8/vrf/BatchVRFCoordinatorV2.sol +++ b/contracts/src/v0.8/vrf/BatchVRFCoordinatorV2.sol @@ -32,10 +32,10 @@ contract BatchVRFCoordinatorV2 { try COORDINATOR.fulfillRandomWords(proofs[i], rcs[i]) returns (uint96 /* payment */) { continue; } catch Error(string memory reason) { - uint256 requestId = getRequestIdFromProof(proofs[i]); + uint256 requestId = _getRequestIdFromProof(proofs[i]); emit ErrorReturned(requestId, reason); } catch (bytes memory lowLevelData) { - uint256 requestId = getRequestIdFromProof(proofs[i]); + uint256 requestId = _getRequestIdFromProof(proofs[i]); emit RawErrorReturned(requestId, lowLevelData); } } @@ -45,8 +45,7 @@ contract BatchVRFCoordinatorV2 { * @notice Returns the proving key hash associated with this public key. * @param publicKey the key to return the hash of. */ - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function hashOfKey(uint256[2] memory publicKey) internal pure returns (bytes32) { + function _hashOfKey(uint256[2] memory publicKey) internal pure returns (bytes32) { return keccak256(abi.encode(publicKey)); } @@ -54,9 +53,8 @@ contract BatchVRFCoordinatorV2 { * @notice Returns the request ID of the request associated with the given proof. * @param proof the VRF proof provided by the VRF oracle. */ - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function getRequestIdFromProof(VRFTypes.Proof memory proof) internal pure returns (uint256) { - bytes32 keyHash = hashOfKey(proof.pk); + function _getRequestIdFromProof(VRFTypes.Proof memory proof) internal pure returns (uint256) { + bytes32 keyHash = _hashOfKey(proof.pk); return uint256(keccak256(abi.encode(keyHash, proof.seed))); } } diff --git a/contracts/src/v0.8/vrf/KeepersVRFConsumer.sol b/contracts/src/v0.8/vrf/KeepersVRFConsumer.sol index 438696c7f4d..a18c6e03798 100644 --- a/contracts/src/v0.8/vrf/KeepersVRFConsumer.sol +++ b/contracts/src/v0.8/vrf/KeepersVRFConsumer.sol @@ -6,7 +6,6 @@ import {VRFConsumerBaseV2} from "./VRFConsumerBaseV2.sol"; import {VRFCoordinatorV2Interface} from "./interfaces/VRFCoordinatorV2Interface.sol"; // solhint-disable chainlink-solidity/prefix-immutable-variables-with-i -// solhint-disable chainlink-solidity/prefix-internal-functions-with-underscore /** * @title KeepersVRFConsumer @@ -76,7 +75,7 @@ contract KeepersVRFConsumer is KeeperCompatibleInterface, VRFConsumerBaseV2 { if ((block.timestamp - s_lastTimeStamp) > UPKEEP_INTERVAL) { s_lastTimeStamp = block.timestamp; - requestRandomWords(); + _requestRandomWords(); } } @@ -85,6 +84,7 @@ contract KeepersVRFConsumer is KeeperCompatibleInterface, VRFConsumerBaseV2 { * @param requestId the VRF V2 request ID, provided at request time. * @param randomWords the randomness provided by Chainlink VRF. */ + // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore function fulfillRandomWords(uint256 requestId, uint256[] memory randomWords) internal override { // Check that the request exists. If not, revert. RequestRecord memory record = s_requests[requestId]; @@ -99,7 +99,7 @@ contract KeepersVRFConsumer is KeeperCompatibleInterface, VRFConsumerBaseV2 { /** * @notice Requests random words from Chainlink VRF. */ - function requestRandomWords() internal { + function _requestRandomWords() internal { uint256 requestId = COORDINATOR.requestRandomWords( KEY_HASH, SUBSCRIPTION_ID, diff --git a/contracts/src/v0.8/vrf/VRF.sol b/contracts/src/v0.8/vrf/VRF.sol index 7ec5f2d5a60..a19fc39ec3e 100644 --- a/contracts/src/v0.8/vrf/VRF.sol +++ b/contracts/src/v0.8/vrf/VRF.sol @@ -17,7 +17,7 @@ pragma solidity ^0.8.0; * **************************************************************************** * @dev USAGE - * @dev The main entry point is randomValueFromVRFProof. See its docstring. + * @dev The main entry point is _randomValueFromVRFProof. See its docstring. * **************************************************************************** * @dev PURPOSE @@ -57,18 +57,18 @@ pragma solidity ^0.8.0; * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 * @dev For curve-point multiplication, it's much cheaper to abuse ECRECOVER - * @dev - hashToCurve recursively hashes until it finds a curve x-ordinate. On + * @dev - _hashToCurve recursively hashes until it finds a curve x-ordinate. On * @dev the EVM, this is slightly more efficient than the recommendation in * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.1.1 * @dev step 5, to concatenate with a nonce then hash, and rehash with the * @dev nonce updated until a valid x-ordinate is found. - * @dev - hashToCurve does not include a cipher version string or the byte 0x1 + * @dev - _hashToCurve does not include a cipher version string or the byte 0x1 * @dev in the hash message, as recommended in step 5.B of the draft * @dev standard. They are unnecessary here because no variation in the * @dev cipher suite is allowed. - * @dev - Similarly, the hash input in scalarFromCurvePoints does not include a + * @dev - Similarly, the hash input in _scalarFromCurvePoints does not include a * @dev commitment to the cipher suite, either, which differs from step 2 of * @dev https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 * @dev . Also, the hash input is the concatenation of the uncompressed @@ -88,7 +88,7 @@ pragma solidity ^0.8.0; * @dev Full uniqueness: For any seed and valid VRF public key, there is * @dev exactly one VRF output which can be proved to come from that seed, in - * @dev the sense that the proof will pass verifyVRFProof. + * @dev the sense that the proof will pass _verifyVRFProof. * @dev Full collision resistance: It's cryptographically infeasible to find * @dev two seeds with same VRF output from a fixed, valid VRF key @@ -110,13 +110,13 @@ pragma solidity ^0.8.0; * @dev OTHER SECURITY CONSIDERATIONS * * @dev The seed input to the VRF could in principle force an arbitrary amount - * @dev of work in hashToCurve, by requiring extra rounds of hashing and + * @dev of work in _hashToCurve, by requiring extra rounds of hashing and * @dev checking whether that's yielded the x ordinate of a secp256k1 point. * @dev However, under the Random Oracle Model the probability of choosing a - * @dev point which forces n extra rounds in hashToCurve is 2⁻ⁿ. The base cost - * @dev for calling hashToCurve is about 25,000 gas, and each round of checking + * @dev point which forces n extra rounds in _hashToCurve is 2⁻ⁿ. The base cost + * @dev for calling _hashToCurve is about 25,000 gas, and each round of checking * @dev for a valid x ordinate costs about 15,555 gas, so to find a seed for - * @dev which hashToCurve would cost more than 2,017,000 gas, one would have to + * @dev which _hashToCurve would cost more than 2,017,000 gas, one would have to * @dev try, in expectation, about 2¹²⁸ seeds, which is infeasible for any * @dev foreseeable computational resources. (25,000 + 128 * 15,555 < 2,017,000.) @@ -125,10 +125,10 @@ pragma solidity ^0.8.0; * @dev operation of this contract by choosing an adverse seed. * @dev (See TestMeasureHashToCurveGasCost for verification of the gas cost for - * @dev hashToCurve.) + * @dev _hashToCurve.) - * @dev It may be possible to make a secure constant-time hashToCurve function. - * @dev See notes in hashToCurve docstring. + * @dev It may be possible to make a secure constant-time _hashToCurve function. + * @dev See notes in _hashToCurve docstring. */ contract VRF { // See https://www.secg.org/sec2-v2.pdf, section 2.4.1, for these constants. @@ -142,8 +142,7 @@ contract VRF { // (base^exponent) % FIELD_SIZE // Cribbed from https://medium.com/@rbkhmrcr/precompiles-solidity-e5d29bd428c4 - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function bigModExp(uint256 base, uint256 exponent) internal view returns (uint256 exponentiation) { + function _bigModExp(uint256 base, uint256 exponent) internal view returns (uint256 exponentiation) { uint256 callResult; uint256[6] memory bigModExpContractInputs; bigModExpContractInputs[0] = WORD_LENGTH_BYTES; // Length of base @@ -175,34 +174,30 @@ contract VRF { uint256 private constant SQRT_POWER = (FIELD_SIZE + 1) >> 2; // Computes a s.t. a^2 = x in the field. Assumes a exists - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function squareRoot(uint256 x) internal view returns (uint256) { - return bigModExp(x, SQRT_POWER); + function _squareRoot(uint256 x) internal view returns (uint256) { + return _bigModExp(x, SQRT_POWER); } // The value of y^2 given that (x,y) is on secp256k1. - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function ySquared(uint256 x) internal pure returns (uint256) { + function _ySquared(uint256 x) internal pure returns (uint256) { // Curve is y^2=x^3+7. See section 2.4.1 of https://www.secg.org/sec2-v2.pdf uint256 xCubed = mulmod(x, mulmod(x, x, FIELD_SIZE), FIELD_SIZE); return addmod(xCubed, 7, FIELD_SIZE); } // True iff p is on secp256k1 - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function isOnCurve(uint256[2] memory p) internal pure returns (bool) { + function _isOnCurve(uint256[2] memory p) internal pure returns (bool) { // Section 2.3.6. in https://www.secg.org/sec1-v2.pdf // requires each ordinate to be in [0, ..., FIELD_SIZE-1] // solhint-disable-next-line custom-errors require(p[0] < FIELD_SIZE, "invalid x-ordinate"); // solhint-disable-next-line custom-errors require(p[1] < FIELD_SIZE, "invalid y-ordinate"); - return ySquared(p[0]) == mulmod(p[1], p[1], FIELD_SIZE); + return _ySquared(p[0]) == mulmod(p[1], p[1], FIELD_SIZE); } // Hash x uniformly into {0, ..., FIELD_SIZE-1}. - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function fieldHash(bytes memory b) internal pure returns (uint256 x_) { + function _fieldHash(bytes memory b) internal pure returns (uint256 x_) { x_ = uint256(keccak256(b)); // Rejecting if x >= FIELD_SIZE corresponds to step 2.1 in section 2.3.4 of // http://www.secg.org/sec1-v2.pdf , which is part of the definition of @@ -218,11 +213,10 @@ contract VRF { // step 5.C, which references arbitrary_string_to_point, defined in // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.5 as // returning the point with given x ordinate, and even y ordinate. - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function newCandidateSecp256k1Point(bytes memory b) internal view returns (uint256[2] memory p) { + function _newCandidateSecp256k1Point(bytes memory b) internal view returns (uint256[2] memory p) { unchecked { - p[0] = fieldHash(b); - p[1] = squareRoot(ySquared(p[0])); + p[0] = _fieldHash(b); + p[1] = _squareRoot(_ySquared(p[0])); if (p[1] % 2 == 1) { // Note that 0 <= p[1] < FIELD_SIZE // so this cannot wrap, we use unchecked to save gas. @@ -231,7 +225,7 @@ contract VRF { } } - // Domain-separation tag for initial hash in hashToCurve. Corresponds to + // Domain-separation tag for initial hash in _hashToCurve. Corresponds to // vrf.go/hashToCurveHashPrefix uint256 internal constant HASH_TO_CURVE_HASH_PREFIX = 1; @@ -249,11 +243,10 @@ contract VRF { // // This would greatly simplify the analysis in "OTHER SECURITY CONSIDERATIONS" // https://www.pivotaltracker.com/story/show/171120900 - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function hashToCurve(uint256[2] memory pk, uint256 input) internal view returns (uint256[2] memory rv) { - rv = newCandidateSecp256k1Point(abi.encodePacked(HASH_TO_CURVE_HASH_PREFIX, pk, input)); - while (!isOnCurve(rv)) { - rv = newCandidateSecp256k1Point(abi.encodePacked(rv[0])); + function _hashToCurve(uint256[2] memory pk, uint256 input) internal view returns (uint256[2] memory rv) { + rv = _newCandidateSecp256k1Point(abi.encodePacked(HASH_TO_CURVE_HASH_PREFIX, pk, input)); + while (!_isOnCurve(rv)) { + rv = _newCandidateSecp256k1Point(abi.encodePacked(rv[0])); } } @@ -267,8 +260,7 @@ contract VRF { * @param product: secp256k1 expected to be multiplier * multiplicand * @return verifies true iff product==scalar*multiplicand, with cryptographically high probability */ - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function ecmulVerify( + function _ecmulVerify( uint256[2] memory multiplicand, uint256 scalar, uint256[2] memory product @@ -289,8 +281,7 @@ contract VRF { } // Returns x1/z1-x2/z2=(x1z2-x2z1)/(z1z2) in projective coordinates on P¹(𝔽ₙ) - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function projectiveSub( + function _projectiveSub( uint256 x1, uint256 z1, uint256 x2, @@ -306,8 +297,7 @@ contract VRF { } // Returns x1/z1*x2/z2=(x1x2)/(z1z2), in projective coordinates on P¹(𝔽ₙ) - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function projectiveMul( + function _projectiveMul( uint256 x1, uint256 z1, uint256 x2, @@ -322,7 +312,7 @@ contract VRF { @dev Using projective coordinates avoids costly divisions @dev To use this with p and q in affine coordinates, call - @dev projectiveECAdd(px, py, qx, qy). This will return + @dev _projectiveECAdd(px, py, qx, qy). This will return @dev the addition of (px, py, 1) and (qx, qy, 1), in the @dev secp256k1 group. @@ -332,7 +322,7 @@ contract VRF { @dev This function assumes [px,py,1],[qx,qy,1] are valid projective coordinates of secp256k1 points. That is safe in this contract, - because this method is only used by linearCombination, which checks + because this method is only used by _linearCombination, which checks points are on the curve via ecrecover. ************************************************************************** @param px The first affine coordinate of the first summand @@ -348,8 +338,7 @@ contract VRF { @return sy @return sz */ - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function projectiveECAdd( + function _projectiveECAdd( uint256 px, uint256 py, uint256 qx, @@ -360,11 +349,11 @@ contract VRF { // "Guide to Elliptic Curve Cryptography" by Hankerson, Menezes and Vanstone // We take the equations there for (sx,sy), and homogenize them to // projective coordinates. That way, no inverses are required, here, and we - // only need the one inverse in affineECAdd. + // only need the one inverse in _affineECAdd. // We only need the "point addition" equations from Hankerson et al. Can // skip the "point doubling" equations because p1 == p2 is cryptographically - // impossible, and required not to be the case in linearCombination. + // impossible, and required not to be the case in _linearCombination. // Add extra "projective coordinate" to the two points (uint256 z1, uint256 z2) = (1, 1); @@ -376,15 +365,15 @@ contract VRF { uint256 dx; // Accumulates denominator from sx calculation // sx=((qy-py)/(qx-px))^2-px-qx - (sx, dx) = projectiveMul(lx, lz, lx, lz); // ((qy-py)/(qx-px))^2 - (sx, dx) = projectiveSub(sx, dx, px, z1); // ((qy-py)/(qx-px))^2-px - (sx, dx) = projectiveSub(sx, dx, qx, z2); // ((qy-py)/(qx-px))^2-px-qx + (sx, dx) = _projectiveMul(lx, lz, lx, lz); // ((qy-py)/(qx-px))^2 + (sx, dx) = _projectiveSub(sx, dx, px, z1); // ((qy-py)/(qx-px))^2-px + (sx, dx) = _projectiveSub(sx, dx, qx, z2); // ((qy-py)/(qx-px))^2-px-qx uint256 dy; // Accumulates denominator from sy calculation // sy=((qy-py)/(qx-px))(px-sx)-py - (sy, dy) = projectiveSub(px, z1, sx, dx); // px-sx - (sy, dy) = projectiveMul(sy, dy, lx, lz); // ((qy-py)/(qx-px))(px-sx) - (sy, dy) = projectiveSub(sy, dy, py, z1); // ((qy-py)/(qx-px))(px-sx)-py + (sy, dy) = _projectiveSub(px, z1, sx, dx); // px-sx + (sy, dy) = _projectiveMul(sy, dy, lx, lz); // ((qy-py)/(qx-px))(px-sx) + (sy, dy) = _projectiveSub(sy, dy, py, z1); // ((qy-py)/(qx-px))(px-sx)-py if (dx != dy) { // Cross-multiply to put everything over a common denominator @@ -400,13 +389,12 @@ contract VRF { // p1+p2, as affine points on secp256k1. // - // invZ must be the inverse of the z returned by projectiveECAdd(p1, p2). + // invZ must be the inverse of the z returned by _projectiveECAdd(p1, p2). // It is computed off-chain to save gas. // - // p1 and p2 must be distinct, because projectiveECAdd doesn't handle + // p1 and p2 must be distinct, because _projectiveECAdd doesn't handle // point doubling. - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function affineECAdd( + function _affineECAdd( uint256[2] memory p1, uint256[2] memory p2, uint256 invZ @@ -414,7 +402,7 @@ contract VRF { uint256 x; uint256 y; uint256 z; - (x, y, z) = projectiveECAdd(p1[0], p1[1], p2[0], p2[1]); + (x, y, z) = _projectiveECAdd(p1[0], p1[1], p2[0], p2[1]); // solhint-disable-next-line custom-errors require(mulmod(z, invZ, FIELD_SIZE) == 1, "invZ must be inverse of z"); // Clear the z ordinate of the projective representation by dividing through @@ -424,8 +412,7 @@ contract VRF { // True iff address(c*p+s*g) == lcWitness, where g is generator. (With // cryptographically high probability.) - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function verifyLinearCombinationWithGenerator( + function _verifyLinearCombinationWithGenerator( uint256 c, uint256[2] memory p, uint256 s, @@ -457,9 +444,8 @@ contract VRF { // (cryptographically impossible) case that a prover accidentally derives // a proof with equal c*p1 and s*p2, they should retry with a different // proof nonce.) Assumes that all points are on secp256k1 - // (which is checked in verifyVRFProof below.) - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function linearCombination( + // (which is checked in _verifyVRFProof below.) + function _linearCombination( uint256 c, uint256[2] memory p1, uint256[2] memory cp1Witness, @@ -473,18 +459,18 @@ contract VRF { // solhint-disable-next-line custom-errors require((cp1Witness[0] % FIELD_SIZE) != (sp2Witness[0] % FIELD_SIZE), "points in sum must be distinct"); // solhint-disable-next-line custom-errors - require(ecmulVerify(p1, c, cp1Witness), "First mul check failed"); + require(_ecmulVerify(p1, c, cp1Witness), "First mul check failed"); // solhint-disable-next-line custom-errors - require(ecmulVerify(p2, s, sp2Witness), "Second mul check failed"); - return affineECAdd(cp1Witness, sp2Witness, zInv); + require(_ecmulVerify(p2, s, sp2Witness), "Second mul check failed"); + return _affineECAdd(cp1Witness, sp2Witness, zInv); } } - // Domain-separation tag for the hash taken in scalarFromCurvePoints. + // Domain-separation tag for the hash taken in _scalarFromCurvePoints. // Corresponds to scalarFromCurveHashPrefix in vrf.go uint256 internal constant SCALAR_FROM_CURVE_POINTS_HASH_PREFIX = 2; - // Pseudo-random number from inputs. Matches vrf.go/scalarFromCurvePoints, and + // Pseudo-random number from inputs. Matches vrf.go/_scalarFromCurvePoints, and // https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-vrf-05#section-5.4.3 // The draft calls (in step 7, via the definition of string_to_int, in // https://datatracker.ietf.org/doc/html/rfc8017#section-4.2 ) for taking the @@ -495,8 +481,7 @@ contract VRF { // using the compressed representation of the points, if we collated the y // parities into a single bytes32. // https://www.pivotaltracker.com/story/show/171120588 - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function scalarFromCurvePoints( + function _scalarFromCurvePoints( uint256[2] memory hash, uint256[2] memory pk, uint256[2] memory gamma, @@ -508,15 +493,14 @@ contract VRF { // True if (gamma, c, s) is a correctly constructed randomness proof from pk // and seed. zInv must be the inverse of the third ordinate from - // projectiveECAdd applied to cGammaWitness and sHashWitness. Corresponds to + // _projectiveECAdd applied to cGammaWitness and sHashWitness. Corresponds to // section 5.3 of the IETF draft. // // TODO(alx): Since I'm only using pk in the ecrecover call, I could only pass // the x ordinate, and the parity of the y ordinate in the top bit of uWitness // (which I could make a uint256 without using any extra space.) Would save // about 2000 gas. https://www.pivotaltracker.com/story/show/170828567 - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function verifyVRFProof( + function _verifyVRFProof( uint256[2] memory pk, uint256[2] memory gamma, uint256 c, @@ -529,26 +513,26 @@ contract VRF { ) internal view { unchecked { // solhint-disable-next-line custom-errors - require(isOnCurve(pk), "public key is not on curve"); + require(_isOnCurve(pk), "public key is not on curve"); // solhint-disable-next-line custom-errors - require(isOnCurve(gamma), "gamma is not on curve"); + require(_isOnCurve(gamma), "gamma is not on curve"); // solhint-disable-next-line custom-errors - require(isOnCurve(cGammaWitness), "cGammaWitness is not on curve"); + require(_isOnCurve(cGammaWitness), "cGammaWitness is not on curve"); // solhint-disable-next-line custom-errors - require(isOnCurve(sHashWitness), "sHashWitness is not on curve"); + require(_isOnCurve(sHashWitness), "sHashWitness is not on curve"); // Step 5. of IETF draft section 5.3 (pk corresponds to 5.3's Y, and here // we use the address of u instead of u itself. Also, here we add the // terms instead of taking the difference, and in the proof construction in // vrf.GenerateProof, we correspondingly take the difference instead of // taking the sum as they do in step 7 of section 5.1.) // solhint-disable-next-line custom-errors - require(verifyLinearCombinationWithGenerator(c, pk, s, uWitness), "addr(c*pk+s*g)!=_uWitness"); + require(_verifyLinearCombinationWithGenerator(c, pk, s, uWitness), "addr(c*pk+s*g)!=_uWitness"); // Step 4. of IETF draft section 5.3 (pk corresponds to Y, seed to alpha_string) - uint256[2] memory hash = hashToCurve(pk, seed); + uint256[2] memory hash = _hashToCurve(pk, seed); // Step 6. of IETF draft section 5.3, but see note for step 5 about +/- terms - uint256[2] memory v = linearCombination(c, gamma, cGammaWitness, s, hash, sHashWitness, zInv); + uint256[2] memory v = _linearCombination(c, gamma, cGammaWitness, s, hash, sHashWitness, zInv); // Steps 7. and 8. of IETF draft section 5.3 - uint256 derivedC = scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + uint256 derivedC = _scalarFromCurvePoints(hash, pk, gamma, uWitness, v); // solhint-disable-next-line custom-errors require(c == derivedC, "invalid proof"); } @@ -580,9 +564,8 @@ contract VRF { * @return output i.e., the random output implied by the proof * *************************************************************************** */ - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function randomValueFromVRFProof(Proof memory proof, uint256 seed) internal view returns (uint256 output) { - verifyVRFProof( + function _randomValueFromVRFProof(Proof memory proof, uint256 seed) internal view returns (uint256 output) { + _verifyVRFProof( proof.pk, proof.gamma, proof.c, diff --git a/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol b/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol index 994e3af7dcb..5150d263a8b 100644 --- a/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol +++ b/contracts/src/v0.8/vrf/VRFCoordinatorV2.sol @@ -315,7 +315,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo if (s_subscriptionConfigs[subId].owner == address(0)) { revert InvalidSubscription(); } - cancelSubscriptionHelper(subId, s_subscriptionConfigs[subId].owner); + _cancelSubscriptionHelper(subId, s_subscriptionConfigs[subId].owner); } /** @@ -387,7 +387,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo // The consequence for users is that they can send requests // for invalid keyHashes which will simply not be fulfilled. uint64 nonce = currentNonce + 1; - (uint256 requestId, uint256 preSeed) = computeRequestId(keyHash, msg.sender, subId, nonce); + (uint256 requestId, uint256 preSeed) = _computeRequestId(keyHash, msg.sender, subId, nonce); s_requestCommitments[requestId] = keccak256( abi.encode(requestId, ChainSpecificUtil._getBlockNumber(), subId, callbackGasLimit, numWords, msg.sender) @@ -416,8 +416,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo return s_requestCommitments[requestId]; } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function computeRequestId( + function _computeRequestId( bytes32 keyHash, address sender, uint64 subId, @@ -431,8 +430,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo * @dev calls target address with exactly gasAmount gas and data as calldata * or reverts if at least gasAmount gas is not available. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { assembly { let g := gas() // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow @@ -461,8 +459,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo return success; } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getRandomnessFromProof( + function _getRandomnessFromProof( Proof memory proof, RequestCommitment memory rc ) private view returns (bytes32 keyHash, uint256 requestId, uint256 randomness) { @@ -493,7 +490,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo // The seed actually used by the VRF machinery, mixing in the blockhash uint256 actualSeed = uint256(keccak256(abi.encodePacked(proof.seed, blockHash))); - randomness = VRF.randomValueFromVRFProof(proof, actualSeed); // Reverts on failure + randomness = VRF._randomValueFromVRFProof(proof, actualSeed); // Reverts on failure } /* @@ -527,7 +524,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo */ function fulfillRandomWords(Proof memory proof, RequestCommitment memory rc) external nonReentrant returns (uint96) { uint256 startGas = gasleft(); - (bytes32 keyHash, uint256 requestId, uint256 randomness) = getRandomnessFromProof(proof, rc); + (bytes32 keyHash, uint256 requestId, uint256 randomness) = _getRandomnessFromProof(proof, rc); uint256[] memory randomWords = new uint256[](rc.numWords); for (uint256 i = 0; i < rc.numWords; i++) { @@ -541,10 +538,10 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo // Important to not let them exhaust the gas budget and avoid oracle payment. // Do not allow any non-view/non-pure coordinator functions to be called // during the consumers callback code via reentrancyLock. - // Note that callWithExactGas will revert if we do not have sufficient gas + // Note that _callWithExactGas will revert if we do not have sufficient gas // to give the callee their requested amount. s_config.reentrancyLock = true; - bool success = callWithExactGas(rc.callbackGasLimit, rc.sender, resp); + bool success = _callWithExactGas(rc.callbackGasLimit, rc.sender, resp); s_config.reentrancyLock = false; // Increment the req count for fee tier selection. @@ -557,7 +554,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo // We also add the flat link fee to the payment amount. // Its specified in millionths of link, if s_config.fulfillmentFlatFeeLinkPPM = 1 // 1 link / 1e6 = 1e18 juels / 1e6 = 1e12 juels. - uint96 payment = calculatePaymentAmount( + uint96 payment = _calculatePaymentAmount( startGas, s_config.gasAfterPaymentCalculation, getFeeTier(reqCount), @@ -574,15 +571,14 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo } // Get the amount of gas used for fulfillment - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculatePaymentAmount( + function _calculatePaymentAmount( uint256 startGas, uint256 gasAfterPaymentCalculation, uint32 fulfillmentFlatFeeLinkPPM, uint256 weiPerUnitGas ) internal view returns (uint96) { int256 weiPerUnitLink; - weiPerUnitLink = getFeedData(); + weiPerUnitLink = _getFeedData(); if (weiPerUnitLink <= 0) { revert InvalidLinkWeiPrice(weiPerUnitLink); } @@ -598,8 +594,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo return uint96(paymentNoFee + fee); } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getFeedData() private view returns (int256) { + function _getFeedData() private view returns (int256) { uint32 stalenessSeconds = s_config.stalenessSeconds; bool staleFallback = stalenessSeconds > 0; uint256 timestamp; @@ -770,11 +765,10 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo if (pendingRequestExists(subId)) { revert PendingRequestExists(); } - cancelSubscriptionHelper(subId, to); + _cancelSubscriptionHelper(subId, to); } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function cancelSubscriptionHelper(uint64 subId, address to) private nonReentrant { + function _cancelSubscriptionHelper(uint64 subId, address to) private nonReentrant { SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; Subscription memory sub = s_subscriptions[subId]; uint96 balance = sub.balance; @@ -801,7 +795,7 @@ contract VRFCoordinatorV2 is VRF, ConfirmedOwner, TypeAndVersionInterface, VRFCo SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; for (uint256 i = 0; i < subConfig.consumers.length; i++) { for (uint256 j = 0; j < s_provingKeyHashes.length; j++) { - (uint256 reqId, ) = computeRequestId( + (uint256 reqId, ) = _computeRequestId( s_provingKeyHashes[j], subConfig.consumers[i], subId, diff --git a/contracts/src/v0.8/vrf/VRFOwner.sol b/contracts/src/v0.8/vrf/VRFOwner.sol index 055308cac42..3b35eae8a47 100644 --- a/contracts/src/v0.8/vrf/VRFOwner.sol +++ b/contracts/src/v0.8/vrf/VRFOwner.sol @@ -194,8 +194,7 @@ contract VRFOwner is ConfirmedOwner, AuthorizedReceiver { * @param fallbackWeiPerUnitLink fallback eth/link price in the case of a stale feed * @param feeConfig fee tier configuration */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function setConfigPrivate( + function _setConfig( uint16 minimumRequestConfirmations, uint32 maxGasLimit, uint32 stalenessSeconds, @@ -236,8 +235,7 @@ contract VRFOwner is ConfirmedOwner, AuthorizedReceiver { * @dev when too many local variables are in the same scope. * @return Config struct containing all relevant configs from the VRF coordinator. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getConfigs() private view returns (Config memory) { + function _getConfigs() private view returns (Config memory) { ( uint16 minimumRequestConfirmations, uint32 maxGasLimit, @@ -286,15 +284,15 @@ contract VRFOwner is ConfirmedOwner, AuthorizedReceiver { VRFTypes.Proof memory proof, VRFTypes.RequestCommitment memory rc ) external validateAuthorizedSender { - uint256 requestId = requestIdFromProof(proof.pk, proof.seed); + uint256 requestId = _requestIdFromProof(proof.pk, proof.seed); // Get current configs to restore them to original values after - // calling setConfigPrivate. - Config memory cfg = getConfigs(); + // calling _setConfig. + Config memory cfg = _getConfigs(); - // call setConfigPrivate with the appropriate params in order to fulfill + // call _setConfig with the appropriate params in order to fulfill // an accidentally-underfunded request. - setConfigPrivate( + _setConfig( cfg.minimumRequestConfirmations, cfg.maxGasLimit, 1, // stalenessSeconds @@ -316,7 +314,7 @@ contract VRFOwner is ConfirmedOwner, AuthorizedReceiver { s_vrfCoordinator.fulfillRandomWords(proof, rc); // reset configuration back to old values. - setConfigPrivate( + _setConfig( cfg.minimumRequestConfirmations, cfg.maxGasLimit, cfg.stalenessSeconds, @@ -342,8 +340,7 @@ contract VRFOwner is ConfirmedOwner, AuthorizedReceiver { * @param proofSeed the proof seed * @dev Refer to VRFCoordinatorV2.getRandomnessFromProof for original implementation. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function requestIdFromProof(uint256[2] memory publicKey, uint256 proofSeed) private view returns (uint256) { + function _requestIdFromProof(uint256[2] memory publicKey, uint256 proofSeed) private view returns (uint256) { bytes32 keyHash = s_vrfCoordinator.hashOfKey(publicKey); uint256 requestId = uint256(keccak256(abi.encode(keyHash, proofSeed))); return requestId; diff --git a/contracts/src/v0.8/vrf/VRFV2Wrapper.sol b/contracts/src/v0.8/vrf/VRFV2Wrapper.sol index a041c62c3fe..805c8d76cb6 100644 --- a/contracts/src/v0.8/vrf/VRFV2Wrapper.sol +++ b/contracts/src/v0.8/vrf/VRFV2Wrapper.sol @@ -221,8 +221,8 @@ contract VRFV2Wrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBas function calculateRequestPrice( uint32 _callbackGasLimit ) external view override onlyConfiguredNotDisabled returns (uint256) { - int256 weiPerUnitLink = getFeedData(); - return calculateRequestPriceInternal(_callbackGasLimit, tx.gasprice, weiPerUnitLink); + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, tx.gasprice, weiPerUnitLink); } /** @@ -238,12 +238,11 @@ contract VRFV2Wrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBas uint32 _callbackGasLimit, uint256 _requestGasPriceWei ) external view override onlyConfiguredNotDisabled returns (uint256) { - int256 weiPerUnitLink = getFeedData(); - return calculateRequestPriceInternal(_callbackGasLimit, _requestGasPriceWei, weiPerUnitLink); + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, _requestGasPriceWei, weiPerUnitLink); } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculateRequestPriceInternal( + function _calculateRequestPrice( uint256 _gas, uint256 _requestGasPrice, int256 _weiPerUnitLink @@ -286,9 +285,9 @@ contract VRFV2Wrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBas _data, (uint32, uint16, uint32) ); - uint32 eip150Overhead = getEIP150Overhead(callbackGasLimit); - int256 weiPerUnitLink = getFeedData(); - uint256 price = calculateRequestPriceInternal(callbackGasLimit, tx.gasprice, weiPerUnitLink); + uint32 eip150Overhead = _getEIP150Overhead(callbackGasLimit); + int256 weiPerUnitLink = _getFeedData(); + uint256 price = _calculateRequestPrice(callbackGasLimit, tx.gasprice, weiPerUnitLink); // solhint-disable-next-line custom-errors require(_amount >= price, "fee too low"); // solhint-disable-next-line custom-errors @@ -347,14 +346,13 @@ contract VRFV2Wrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBas VRFV2WrapperConsumerBase c; bytes memory resp = abi.encodeWithSelector(c.rawFulfillRandomWords.selector, _requestId, _randomWords); - bool success = callWithExactGas(callback.callbackGasLimit, callback.callbackAddress, resp); + bool success = _callWithExactGas(callback.callbackGasLimit, callback.callbackAddress, resp); if (!success) { emit WrapperFulfillmentFailed(_requestId, callback.callbackAddress); } } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getFeedData() private view returns (int256) { + function _getFeedData() private view returns (int256) { bool staleFallback = s_stalenessSeconds > 0; uint256 timestamp; int256 weiPerUnitLink; @@ -371,8 +369,7 @@ contract VRFV2Wrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBas /** * @dev Calculates extra amount of gas required for running an assembly call() post-EIP150. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getEIP150Overhead(uint32 gas) private pure returns (uint32) { + function _getEIP150Overhead(uint32 gas) private pure returns (uint32) { return gas / 63 + 1; } @@ -380,8 +377,7 @@ contract VRFV2Wrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsumerBas * @dev calls target address with exactly gasAmount gas and data as calldata * or reverts if at least gasAmount gas is not available. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { assembly { let g := gas() // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow diff --git a/contracts/src/v0.8/vrf/dev/BatchVRFCoordinatorV2Plus.sol b/contracts/src/v0.8/vrf/dev/BatchVRFCoordinatorV2Plus.sol index 34b5ff6f189..06c44d4dcd1 100644 --- a/contracts/src/v0.8/vrf/dev/BatchVRFCoordinatorV2Plus.sol +++ b/contracts/src/v0.8/vrf/dev/BatchVRFCoordinatorV2Plus.sol @@ -32,10 +32,10 @@ contract BatchVRFCoordinatorV2Plus { try COORDINATOR.fulfillRandomWords(proofs[i], rcs[i]) returns (uint96 /* payment */) { continue; } catch Error(string memory reason) { - uint256 requestId = getRequestIdFromProof(proofs[i]); + uint256 requestId = _getRequestIdFromProof(proofs[i]); emit ErrorReturned(requestId, reason); } catch (bytes memory lowLevelData) { - uint256 requestId = getRequestIdFromProof(proofs[i]); + uint256 requestId = _getRequestIdFromProof(proofs[i]); emit RawErrorReturned(requestId, lowLevelData); } } @@ -45,8 +45,7 @@ contract BatchVRFCoordinatorV2Plus { * @notice Returns the proving key hash associated with this public key. * @param publicKey the key to return the hash of. */ - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function hashOfKey(uint256[2] memory publicKey) internal pure returns (bytes32) { + function _hashOfKey(uint256[2] memory publicKey) internal pure returns (bytes32) { return keccak256(abi.encode(publicKey)); } @@ -54,9 +53,8 @@ contract BatchVRFCoordinatorV2Plus { * @notice Returns the request ID of the request associated with the given proof. * @param proof the VRF proof provided by the VRF oracle. */ - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function getRequestIdFromProof(VRFTypes.Proof memory proof) internal pure returns (uint256) { - bytes32 keyHash = hashOfKey(proof.pk); + function _getRequestIdFromProof(VRFTypes.Proof memory proof) internal pure returns (uint256) { + bytes32 keyHash = _hashOfKey(proof.pk); return uint256(keccak256(abi.encode(keyHash, proof.seed))); } } diff --git a/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol b/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol index 478ff4cce4a..e4708bb1fcf 100644 --- a/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol +++ b/contracts/src/v0.8/vrf/dev/SubscriptionAPI.sol @@ -151,7 +151,7 @@ abstract contract SubscriptionAPI is ConfirmedOwner, IERC677Receiver, IVRFSubscr if (s_subscriptionConfigs[subId].owner == address(0)) { revert InvalidSubscription(); } - cancelSubscriptionHelper(subId, s_subscriptionConfigs[subId].owner); + _cancelSubscriptionHelper(subId, s_subscriptionConfigs[subId].owner); } /** @@ -392,8 +392,7 @@ abstract contract SubscriptionAPI is ConfirmedOwner, IERC677Receiver, IVRFSubscr emit SubscriptionConsumerAdded(subId, consumer); } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function deleteSubscription(uint256 subId) internal returns (uint96 balance, uint96 nativeBalance) { + function _deleteSubscription(uint256 subId) internal returns (uint96 balance, uint96 nativeBalance) { SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; Subscription memory sub = s_subscriptions[subId]; balance = sub.balance; @@ -411,9 +410,8 @@ abstract contract SubscriptionAPI is ConfirmedOwner, IERC677Receiver, IVRFSubscr return (balance, nativeBalance); } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function cancelSubscriptionHelper(uint256 subId, address to) internal { - (uint96 balance, uint96 nativeBalance) = deleteSubscription(subId); + function _cancelSubscriptionHelper(uint256 subId, address to) internal { + (uint96 balance, uint96 nativeBalance) = _deleteSubscription(subId); // Only withdraw LINK if the token is active and there is a balance. if (address(LINK) != address(0) && balance != 0) { diff --git a/contracts/src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol b/contracts/src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol index 31a555f8ac7..e0e46fe67b7 100644 --- a/contracts/src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol +++ b/contracts/src/v0.8/vrf/dev/VRFCoordinatorV2_5.sol @@ -269,7 +269,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { // The consequence for users is that they can send requests // for invalid keyHashes which will simply not be fulfilled. uint64 nonce = currentNonce + 1; - (uint256 requestId, uint256 preSeed) = computeRequestId(req.keyHash, msg.sender, req.subId, nonce); + (uint256 requestId, uint256 preSeed) = _computeRequestId(req.keyHash, msg.sender, req.subId, nonce); VRFV2PlusClient.ExtraArgsV1 memory extraArgs = _fromBytes(req.extraArgs); bytes memory extraArgsBytes = VRFV2PlusClient._argsToBytes(extraArgs); @@ -300,8 +300,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { return requestId; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function computeRequestId( + function _computeRequestId( bytes32 keyHash, address sender, uint256 subId, @@ -315,8 +314,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { * @dev calls target address with exactly gasAmount gas and data as calldata * or reverts if at least gasAmount gas is not available. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { assembly { let g := gas() // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow @@ -351,8 +349,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { uint256 randomness; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function getRandomnessFromProof( + function _getRandomnessFromProof( Proof memory proof, RequestCommitment memory rc ) internal view returns (Output memory) { @@ -384,7 +381,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { // The seed actually used by the VRF machinery, mixing in the blockhash uint256 actualSeed = uint256(keccak256(abi.encodePacked(proof.seed, blockHash))); - uint256 randomness = VRF.randomValueFromVRFProof(proof, actualSeed); // Reverts on failure + uint256 randomness = VRF._randomValueFromVRFProof(proof, actualSeed); // Reverts on failure return Output(keyHash, requestId, randomness); } @@ -397,7 +394,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { */ function fulfillRandomWords(Proof memory proof, RequestCommitment memory rc) external nonReentrant returns (uint96) { uint256 startGas = gasleft(); - Output memory output = getRandomnessFromProof(proof, rc); + Output memory output = _getRandomnessFromProof(proof, rc); uint256[] memory randomWords = new uint256[](rc.numWords); for (uint256 i = 0; i < rc.numWords; i++) { @@ -411,10 +408,10 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { // Important to not let them exhaust the gas budget and avoid oracle payment. // Do not allow any non-view/non-pure coordinator functions to be called // during the consumers callback code via reentrancyLock. - // Note that callWithExactGas will revert if we do not have sufficient gas + // Note that _callWithExactGas will revert if we do not have sufficient gas // to give the callee their requested amount. s_config.reentrancyLock = true; - bool success = callWithExactGas(rc.callbackGasLimit, rc.sender, resp); + bool success = _callWithExactGas(rc.callbackGasLimit, rc.sender, resp); s_config.reentrancyLock = false; // Increment the req count for the subscription. @@ -427,7 +424,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { // We want to charge users exactly for how much gas they use in their callback. // The gasAfterPaymentCalculation is meant to cover these additional operations where we // decrement the subscription balance and increment the oracles withdrawable balance. - uint96 payment = calculatePaymentAmount( + uint96 payment = _calculatePaymentAmount( startGas, s_config.gasAfterPaymentCalculation, tx.gasprice, @@ -455,8 +452,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { } } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculatePaymentAmount( + function _calculatePaymentAmount( uint256 startGas, uint256 gasAfterPaymentCalculation, uint256 weiPerUnitGas, @@ -464,7 +460,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { ) internal view returns (uint96) { if (nativePayment) { return - calculatePaymentAmountNative( + _calculatePaymentAmountNative( startGas, gasAfterPaymentCalculation, s_feeConfig.fulfillmentFlatFeeNativePPM, @@ -472,7 +468,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { ); } return - calculatePaymentAmountLink( + _calculatePaymentAmountLink( startGas, gasAfterPaymentCalculation, s_feeConfig.fulfillmentFlatFeeLinkPPM, @@ -480,8 +476,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { ); } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculatePaymentAmountNative( + function _calculatePaymentAmountNative( uint256 startGas, uint256 gasAfterPaymentCalculation, uint32 fulfillmentFlatFeePPM, @@ -498,15 +493,14 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { } // Get the amount of gas used for fulfillment - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculatePaymentAmountLink( + function _calculatePaymentAmountLink( uint256 startGas, uint256 gasAfterPaymentCalculation, uint32 fulfillmentFlatFeeLinkPPM, uint256 weiPerUnitGas ) internal view returns (uint96) { int256 weiPerUnitLink; - weiPerUnitLink = getFeedData(); + weiPerUnitLink = _getFeedData(); if (weiPerUnitLink <= 0) { revert InvalidLinkWeiPrice(weiPerUnitLink); } @@ -522,8 +516,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { return uint96(paymentNoFee + fee); } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getFeedData() private view returns (int256) { + function _getFeedData() private view returns (int256) { uint32 stalenessSeconds = s_config.stalenessSeconds; bool staleFallback = stalenessSeconds > 0; uint256 timestamp; @@ -543,7 +536,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; for (uint256 i = 0; i < subConfig.consumers.length; i++) { for (uint256 j = 0; j < s_provingKeyHashes.length; j++) { - (uint256 reqId, ) = computeRequestId( + (uint256 reqId, ) = _computeRequestId( s_provingKeyHashes[j], subConfig.consumers[i], subId, @@ -591,7 +584,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { if (pendingRequestExists(subId)) { revert PendingRequestExists(); } - cancelSubscriptionHelper(subId, to); + _cancelSubscriptionHelper(subId, to); } /*************************************************************************** @@ -627,8 +620,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { uint96 nativeBalance; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function isTargetRegistered(address target) internal view returns (bool) { + function _isTargetRegistered(address target) internal view returns (bool) { for (uint256 i = 0; i < s_migrationTargets.length; i++) { if (s_migrationTargets[i] == target) { return true; @@ -638,7 +630,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { } function registerMigratableCoordinator(address target) external onlyOwner { - if (isTargetRegistered(target)) { + if (_isTargetRegistered(target)) { revert CoordinatorAlreadyRegistered(target); } s_migrationTargets.push(target); @@ -660,7 +652,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { } function migrate(uint256 subId, address newCoordinator) external nonReentrant { - if (!isTargetRegistered(newCoordinator)) { + if (!_isTargetRegistered(newCoordinator)) { revert CoordinatorNotRegistered(newCoordinator); } (uint96 balance, uint96 nativeBalance, , address owner, address[] memory consumers) = getSubscription(subId); @@ -678,7 +670,7 @@ contract VRFCoordinatorV2_5 is VRF, SubscriptionAPI, IVRFCoordinatorV2Plus { nativeBalance: nativeBalance }); bytes memory encodedData = abi.encode(migrationData); - deleteSubscription(subId); + _deleteSubscription(subId); IVRFCoordinatorV2PlusMigration(newCoordinator).onMigration{value: nativeBalance}(encodedData); // Only transfer LINK if the token is active and there is a balance. diff --git a/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol index 12557f73d2c..f6d5fec68d5 100644 --- a/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol +++ b/contracts/src/v0.8/vrf/dev/VRFV2PlusWrapper.sol @@ -276,14 +276,14 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume function calculateRequestPrice( uint32 _callbackGasLimit ) external view override onlyConfiguredNotDisabled returns (uint256) { - int256 weiPerUnitLink = getFeedData(); - return calculateRequestPriceInternal(_callbackGasLimit, tx.gasprice, weiPerUnitLink); + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, tx.gasprice, weiPerUnitLink); } function calculateRequestPriceNative( uint32 _callbackGasLimit ) external view override onlyConfiguredNotDisabled returns (uint256) { - return calculateRequestPriceNativeInternal(_callbackGasLimit, tx.gasprice); + return _calculateRequestPriceNative(_callbackGasLimit, tx.gasprice); } /** @@ -299,19 +299,18 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume uint32 _callbackGasLimit, uint256 _requestGasPriceWei ) external view override onlyConfiguredNotDisabled returns (uint256) { - int256 weiPerUnitLink = getFeedData(); - return calculateRequestPriceInternal(_callbackGasLimit, _requestGasPriceWei, weiPerUnitLink); + int256 weiPerUnitLink = _getFeedData(); + return _calculateRequestPrice(_callbackGasLimit, _requestGasPriceWei, weiPerUnitLink); } function estimateRequestPriceNative( uint32 _callbackGasLimit, uint256 _requestGasPriceWei ) external view override onlyConfiguredNotDisabled returns (uint256) { - return calculateRequestPriceNativeInternal(_callbackGasLimit, _requestGasPriceWei); + return _calculateRequestPriceNative(_callbackGasLimit, _requestGasPriceWei); } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculateRequestPriceNativeInternal(uint256 _gas, uint256 _requestGasPrice) internal view returns (uint256) { + function _calculateRequestPriceNative(uint256 _gas, uint256 _requestGasPrice) internal view returns (uint256) { // costWei is the base fee denominated in wei (native) // costWei takes into account the L1 posting costs of the VRF fulfillment // transaction, if we are on an L2. @@ -329,8 +328,7 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume return feeWithFlatFee; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculateRequestPriceInternal( + function _calculateRequestPrice( uint256 _gas, uint256 _requestGasPrice, int256 _weiPerUnitLink @@ -374,9 +372,9 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume (uint32, uint16, uint32, bytes) ); checkPaymentMode(extraArgs, true); - uint32 eip150Overhead = getEIP150Overhead(callbackGasLimit); - int256 weiPerUnitLink = getFeedData(); - uint256 price = calculateRequestPriceInternal(callbackGasLimit, tx.gasprice, weiPerUnitLink); + uint32 eip150Overhead = _getEIP150Overhead(callbackGasLimit); + int256 weiPerUnitLink = _getFeedData(); + uint256 price = _calculateRequestPrice(callbackGasLimit, tx.gasprice, weiPerUnitLink); // solhint-disable-next-line custom-errors require(_amount >= price, "fee too low"); // solhint-disable-next-line custom-errors @@ -430,8 +428,8 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume ) external payable override returns (uint256 requestId) { checkPaymentMode(extraArgs, false); - uint32 eip150Overhead = getEIP150Overhead(_callbackGasLimit); - uint256 price = calculateRequestPriceNativeInternal(_callbackGasLimit, tx.gasprice); + uint32 eip150Overhead = _getEIP150Overhead(_callbackGasLimit); + uint256 price = _calculateRequestPriceNative(_callbackGasLimit, tx.gasprice); // solhint-disable-next-line custom-errors require(msg.value >= price, "fee too low"); // solhint-disable-next-line custom-errors @@ -505,14 +503,13 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume VRFV2PlusWrapperConsumerBase c; bytes memory resp = abi.encodeWithSelector(c.rawFulfillRandomWords.selector, _requestId, _randomWords); - bool success = callWithExactGas(callback.callbackGasLimit, callback.callbackAddress, resp); + bool success = _callWithExactGas(callback.callbackGasLimit, callback.callbackAddress, resp); if (!success) { emit WrapperFulfillmentFailed(_requestId, callback.callbackAddress); } } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getFeedData() private view returns (int256) { + function _getFeedData() private view returns (int256) { bool staleFallback = s_stalenessSeconds > 0; uint256 timestamp; int256 weiPerUnitLink; @@ -529,8 +526,7 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume /** * @dev Calculates extra amount of gas required for running an assembly call() post-EIP150. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getEIP150Overhead(uint32 gas) private pure returns (uint32) { + function _getEIP150Overhead(uint32 gas) private pure returns (uint32) { return gas / 63 + 1; } @@ -538,8 +534,7 @@ contract VRFV2PlusWrapper is ConfirmedOwner, TypeAndVersionInterface, VRFConsume * @dev calls target address with exactly gasAmount gas and data as calldata * or reverts if at least gasAmount gas is not available. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { assembly { let g := gas() // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol b/contracts/src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol index f9c34b2b611..02cb15e38a4 100644 --- a/contracts/src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol +++ b/contracts/src/v0.8/vrf/dev/testhelpers/ExposedVRFCoordinatorV2_5.sol @@ -16,18 +16,18 @@ contract ExposedVRFCoordinatorV2_5 is VRFCoordinatorV2_5 { uint256 subId, uint64 nonce ) external pure returns (uint256, uint256) { - return computeRequestId(keyHash, sender, subId, nonce); + return _computeRequestId(keyHash, sender, subId, nonce); } function isTargetRegisteredExternal(address target) external view returns (bool) { - return isTargetRegistered(target); + return _isTargetRegistered(target); } function getRandomnessFromProofExternal( Proof calldata proof, RequestCommitment calldata rc ) external view returns (Output memory) { - return getRandomnessFromProof(proof, rc); + return _getRandomnessFromProof(proof, rc); } function getActiveSubscriptionIdsLength() external view returns (uint256) { diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol index b6c76e1c713..4837411955c 100644 --- a/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2PlusUpgradedVersion.sol @@ -260,7 +260,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is // The consequence for users is that they can send requests // for invalid keyHashes which will simply not be fulfilled. uint64 nonce = currentNonce + 1; - (uint256 requestId, uint256 preSeed) = computeRequestId(req.keyHash, msg.sender, req.subId, nonce); + (uint256 requestId, uint256 preSeed) = _computeRequestId(req.keyHash, msg.sender, req.subId, nonce); VRFV2PlusClient.ExtraArgsV1 memory extraArgs = _fromBytes(req.extraArgs); bytes memory extraArgsBytes = VRFV2PlusClient._argsToBytes(extraArgs); @@ -291,8 +291,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is return requestId; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function computeRequestId( + function _computeRequestId( bytes32 keyHash, address sender, uint256 subId, @@ -306,8 +305,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is * @dev calls target address with exactly gasAmount gas and data as calldata * or reverts if at least gasAmount gas is not available. */ - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { + function _callWithExactGas(uint256 gasAmount, address target, bytes memory data) private returns (bool success) { assembly { let g := gas() // Compute g -= GAS_FOR_CALL_EXACT_CHECK and check for underflow @@ -342,8 +340,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is uint256 randomness; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function getRandomnessFromProof( + function _getRandomnessFromProof( Proof memory proof, RequestCommitment memory rc ) internal view returns (Output memory) { @@ -375,7 +372,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is // The seed actually used by the VRF machinery, mixing in the blockhash uint256 actualSeed = uint256(keccak256(abi.encodePacked(proof.seed, blockHash))); - uint256 randomness = VRF.randomValueFromVRFProof(proof, actualSeed); // Reverts on failure + uint256 randomness = VRF._randomValueFromVRFProof(proof, actualSeed); // Reverts on failure return Output(keyHash, requestId, randomness); } @@ -388,7 +385,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is */ function fulfillRandomWords(Proof memory proof, RequestCommitment memory rc) external nonReentrant returns (uint96) { uint256 startGas = gasleft(); - Output memory output = getRandomnessFromProof(proof, rc); + Output memory output = _getRandomnessFromProof(proof, rc); uint256[] memory randomWords = new uint256[](rc.numWords); for (uint256 i = 0; i < rc.numWords; i++) { @@ -402,10 +399,10 @@ contract VRFCoordinatorV2PlusUpgradedVersion is // Important to not let them exhaust the gas budget and avoid oracle payment. // Do not allow any non-view/non-pure coordinator functions to be called // during the consumers callback code via reentrancyLock. - // Note that callWithExactGas will revert if we do not have sufficient gas + // Note that _callWithExactGas will revert if we do not have sufficient gas // to give the callee their requested amount. s_config.reentrancyLock = true; - bool success = callWithExactGas(rc.callbackGasLimit, rc.sender, resp); + bool success = _callWithExactGas(rc.callbackGasLimit, rc.sender, resp); s_config.reentrancyLock = false; // Increment the req count for the subscription. @@ -418,7 +415,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is // We want to charge users exactly for how much gas they use in their callback. // The gasAfterPaymentCalculation is meant to cover these additional operations where we // decrement the subscription balance and increment the oracles withdrawable balance. - uint96 payment = calculatePaymentAmount( + uint96 payment = _calculatePaymentAmount( startGas, s_config.gasAfterPaymentCalculation, tx.gasprice, @@ -446,8 +443,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is } } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculatePaymentAmount( + function _calculatePaymentAmount( uint256 startGas, uint256 gasAfterPaymentCalculation, uint256 weiPerUnitGas, @@ -455,7 +451,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is ) internal view returns (uint96) { if (nativePayment) { return - calculatePaymentAmountNative( + _calculatePaymentAmountNative( startGas, gasAfterPaymentCalculation, s_feeConfig.fulfillmentFlatFeeNativePPM, @@ -463,7 +459,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is ); } return - calculatePaymentAmountLink( + _calculatePaymentAmountLink( startGas, gasAfterPaymentCalculation, s_feeConfig.fulfillmentFlatFeeLinkPPM, @@ -471,8 +467,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is ); } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculatePaymentAmountNative( + function _calculatePaymentAmountNative( uint256 startGas, uint256 gasAfterPaymentCalculation, uint32 fulfillmentFlatFeePPM, @@ -489,15 +484,14 @@ contract VRFCoordinatorV2PlusUpgradedVersion is } // Get the amount of gas used for fulfillment - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function calculatePaymentAmountLink( + function _calculatePaymentAmountLink( uint256 startGas, uint256 gasAfterPaymentCalculation, uint32 fulfillmentFlatFeeLinkPPM, uint256 weiPerUnitGas ) internal view returns (uint96) { int256 weiPerUnitLink; - weiPerUnitLink = getFeedData(); + weiPerUnitLink = _getFeedData(); if (weiPerUnitLink <= 0) { revert InvalidLinkWeiPrice(weiPerUnitLink); } @@ -513,8 +507,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is return uint96(paymentNoFee + fee); } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function getFeedData() private view returns (int256) { + function _getFeedData() private view returns (int256) { uint32 stalenessSeconds = s_config.stalenessSeconds; bool staleFallback = stalenessSeconds > 0; uint256 timestamp; @@ -540,7 +533,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is SubscriptionConfig memory subConfig = s_subscriptionConfigs[subId]; for (uint256 i = 0; i < subConfig.consumers.length; i++) { for (uint256 j = 0; j < s_provingKeyHashes.length; j++) { - (uint256 reqId, ) = computeRequestId( + (uint256 reqId, ) = _computeRequestId( s_provingKeyHashes[j], subConfig.consumers[i], subId, @@ -588,7 +581,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is if (pendingRequestExists(subId)) { revert PendingRequestExists(); } - cancelSubscriptionHelper(subId, to); + _cancelSubscriptionHelper(subId, to); } /*************************************************************************** @@ -621,8 +614,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is uint96 nativeBalance; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function isTargetRegistered(address target) internal view returns (bool) { + function _isTargetRegistered(address target) internal view returns (bool) { for (uint256 i = 0; i < s_migrationTargets.length; i++) { if (s_migrationTargets[i] == target) { return true; @@ -632,7 +624,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is } function registerMigratableCoordinator(address target) external onlyOwner { - if (isTargetRegistered(target)) { + if (_isTargetRegistered(target)) { revert CoordinatorAlreadyRegistered(target); } s_migrationTargets.push(target); @@ -640,7 +632,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is } function migrate(uint256 subId, address newCoordinator) external nonReentrant { - if (!isTargetRegistered(newCoordinator)) { + if (!_isTargetRegistered(newCoordinator)) { revert CoordinatorNotRegistered(newCoordinator); } (uint96 balance, uint96 nativeBalance, , address owner, address[] memory consumers) = getSubscription(subId); @@ -658,7 +650,7 @@ contract VRFCoordinatorV2PlusUpgradedVersion is nativeBalance: nativeBalance }); bytes memory encodedData = abi.encode(migrationData); - deleteSubscription(subId); + _deleteSubscription(subId); IVRFCoordinatorV2PlusMigration(newCoordinator).onMigration{value: nativeBalance}(encodedData); // Only transfer LINK if the token is active and there is a balance. diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol index af49abbf6b5..0204be807f5 100644 --- a/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFCoordinatorV2Plus_V2Example.sol @@ -135,11 +135,10 @@ contract VRFCoordinatorV2Plus_V2Example is IVRFCoordinatorV2PlusMigration { function requestRandomWords(VRFV2PlusClient.RandomWordsRequest calldata req) external returns (uint256 requestId) { Subscription memory sub = s_subscriptions[req.subId]; sub.reqCount = sub.reqCount + 1; - return handleRequest(msg.sender); + return _handleRequest(msg.sender); } - // solhint-disable-next-line chainlink-solidity/prefix-private-functions-with-underscore - function handleRequest(address requester) private returns (uint256) { + function _handleRequest(address requester) private returns (uint256) { s_requestId = s_requestId + 1; uint256 requestId = s_requestId; s_requestConsumerMapping[s_requestId] = requester; diff --git a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol index 6898e101f82..2ef4e5c021f 100644 --- a/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol +++ b/contracts/src/v0.8/vrf/dev/testhelpers/VRFV2PlusConsumerExample.sol @@ -34,8 +34,7 @@ contract VRFV2PlusConsumerExample is ConfirmedOwner, VRFConsumerBaseV2Plus { return resp.randomWords[idx]; } - // solhint-disable-next-line chainlink-solidity/prefix-internal-functions-with-underscore - function subscribe() internal returns (uint256) { + function _subscribe() internal returns (uint256) { if (s_subId == 0) { s_subId = s_vrfCoordinatorApiV1.createSubscription(); s_vrfCoordinatorApiV1.addConsumer(s_subId, address(this)); @@ -44,12 +43,12 @@ contract VRFV2PlusConsumerExample is ConfirmedOwner, VRFConsumerBaseV2Plus { } function createSubscriptionAndFundNative() external payable { - subscribe(); + _subscribe(); s_vrfCoordinatorApiV1.fundSubscriptionWithNative{value: msg.value}(s_subId); } function createSubscriptionAndFund(uint96 amount) external { - subscribe(); + _subscribe(); // Approve the link transfer. s_linkToken.transferAndCall(address(s_vrfCoordinator), amount, abi.encode(s_subId)); } diff --git a/contracts/src/v0.8/vrf/testhelpers/VRFTestHelper.sol b/contracts/src/v0.8/vrf/testhelpers/VRFTestHelper.sol index e3f9ee04824..bcead3f0c99 100644 --- a/contracts/src/v0.8/vrf/testhelpers/VRFTestHelper.sol +++ b/contracts/src/v0.8/vrf/testhelpers/VRFTestHelper.sol @@ -9,27 +9,27 @@ import {VRF} from "../VRF.sol"; */ contract VRFTestHelper is VRF { function bigModExp_(uint256 base, uint256 exponent) public view returns (uint256) { - return super.bigModExp(base, exponent); + return super._bigModExp(base, exponent); } function squareRoot_(uint256 x) public view returns (uint256) { - return super.squareRoot(x); + return super._squareRoot(x); } function ySquared_(uint256 x) public pure returns (uint256) { - return super.ySquared(x); + return super._ySquared(x); } function fieldHash_(bytes memory b) public pure returns (uint256) { - return super.fieldHash(b); + return super._fieldHash(b); } function hashToCurve_(uint256[2] memory pk, uint256 x) public view returns (uint256[2] memory) { - return super.hashToCurve(pk, x); + return super._hashToCurve(pk, x); } function ecmulVerify_(uint256[2] memory x, uint256 scalar, uint256[2] memory q) public pure returns (bool) { - return super.ecmulVerify(x, scalar, q); + return super._ecmulVerify(x, scalar, q); } function projectiveECAdd_( @@ -38,7 +38,7 @@ contract VRFTestHelper is VRF { uint256 qx, uint256 qy ) public pure returns (uint256, uint256, uint256) { - return super.projectiveECAdd(px, py, qx, qy); + return super._projectiveECAdd(px, py, qx, qy); } function affineECAdd_( @@ -46,7 +46,7 @@ contract VRFTestHelper is VRF { uint256[2] memory p2, uint256 invZ ) public pure returns (uint256[2] memory) { - return super.affineECAdd(p1, p2, invZ); + return super._affineECAdd(p1, p2, invZ); } function verifyLinearCombinationWithGenerator_( @@ -55,7 +55,7 @@ contract VRFTestHelper is VRF { uint256 s, address lcWitness ) public pure returns (bool) { - return super.verifyLinearCombinationWithGenerator(c, p, s, lcWitness); + return super._verifyLinearCombinationWithGenerator(c, p, s, lcWitness); } function linearCombination_( @@ -67,7 +67,7 @@ contract VRFTestHelper is VRF { uint256[2] memory sp2Witness, uint256 zInv ) public pure returns (uint256[2] memory) { - return super.linearCombination(c, p1, cp1Witness, s, p2, sp2Witness, zInv); + return super._linearCombination(c, p1, cp1Witness, s, p2, sp2Witness, zInv); } function scalarFromCurvePoints_( @@ -77,11 +77,11 @@ contract VRFTestHelper is VRF { address uWitness, uint256[2] memory v ) public pure returns (uint256) { - return super.scalarFromCurvePoints(hash, pk, gamma, uWitness, v); + return super._scalarFromCurvePoints(hash, pk, gamma, uWitness, v); } function isOnCurve_(uint256[2] memory p) public pure returns (bool) { - return super.isOnCurve(p); + return super._isOnCurve(p); } function verifyVRFProof_( @@ -95,10 +95,10 @@ contract VRFTestHelper is VRF { uint256[2] memory sHashWitness, uint256 zInv ) public view { - super.verifyVRFProof(pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv); + super._verifyVRFProof(pk, gamma, c, s, seed, uWitness, cGammaWitness, sHashWitness, zInv); } function randomValueFromVRFProof_(Proof memory proof, uint256 seed) public view returns (uint256 output) { - return super.randomValueFromVRFProof(proof, seed); + return super._randomValueFromVRFProof(proof, seed); } } From 12271691635317473e5f3bebb74516383ae57191 Mon Sep 17 00:00:00 2001 From: Ilja Pavlovs Date: Mon, 16 Oct 2023 16:13:41 +0300 Subject: [PATCH 7/9] Chore/vrf 673 fix "On Demand VRFV2 Plus Load Test" workflow (#10947) * VRF-673: fix "On Demand VRFV2 Plus Load Test" workflow * VRF-673: removing artifacts * VRF-673: update * VRF-673: cleanup --- .../on-demand-vrfv2plus-load-test.yml | 23 +++++++++++++++---- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/.github/workflows/on-demand-vrfv2plus-load-test.yml b/.github/workflows/on-demand-vrfv2plus-load-test.yml index 28c47b453de..1faf1b84908 100644 --- a/.github/workflows/on-demand-vrfv2plus-load-test.yml +++ b/.github/workflows/on-demand-vrfv2plus-load-test.yml @@ -65,6 +65,8 @@ jobs: id-token: write contents: read env: + LOKI_URL: ${{ secrets.LOKI_URL }} + LOKI_TOKEN: ${{ secrets.LOKI_TOKEN }} SELECTED_NETWORKS: ${{ inputs.network }} VRFV2PLUS_TEST_DURATION: ${{ inputs.testDuration }} VRFV2PLUS_RATE_LIMIT_UNIT_DURATION: 1m @@ -104,8 +106,19 @@ jobs: uses: actions/checkout@v3 with: fetch-depth: 0 - - name: Run E2E soak tests - run: | - cd integration-tests/load/vrfv2plus - go test -v -timeout 8h -run TestVRFV2PlusLoad/vrfv2plus_soak_test - + - name: Run Tests + uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@eccde1970eca69f079d3efb3409938a72ade8497 # v2.2.13 + with: + test_command_to_run: cd ./integration-tests && go test -v -count=1 -timeout 6h -run TestVRFV2PlusLoad/vrfv2plus_soak_test ./load/vrfv2plus + test_download_vendor_packages_command: cd ./integration-tests && go mod download + cl_repo: ${{ inputs.chainlinkImage }} + cl_image_tag: ${{ inputs.chainlinkVersion }} + aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} + artifacts_name: vrf-test-logs + artifacts_location: ./integration-tests/load/logs/ + token: ${{ secrets.GITHUB_TOKEN }} + go_mod_path: ./integration-tests/go.mod + should_cleanup: false + QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} + QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} + QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} From fd1369c48862cdf4b6f5e85bb6d3f3f36aae836f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:27:52 +0000 Subject: [PATCH 8/9] Bump umani/changed-files from 4.0.1 to 4.1.0 (#10954) Bumps [umani/changed-files](https://github.com/umani/changed-files) from 4.0.1 to 4.1.0. - [Release notes](https://github.com/umani/changed-files/releases) - [Commits](https://github.com/umani/changed-files/compare/0239328a3a6268aad16af7c3e4efc78e32d6c0f0...d7f842d11479940a6036e3aacc6d35523e6ba978) --- updated-dependencies: - dependency-name: umani/changed-files dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Jordan Krage --- .github/workflows/changelog.yml | 2 +- .github/workflows/readme.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 7378a227ecb..eec5663a142 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Check for changed files id: changedfiles - uses: umani/changed-files@0239328a3a6268aad16af7c3e4efc78e32d6c0f0 # Version 4.0.1 + uses: umani/changed-files@d7f842d11479940a6036e3aacc6d35523e6ba978 # Version 4.1.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} pattern: '^docs/CHANGELOG.*$' diff --git a/.github/workflows/readme.yml b/.github/workflows/readme.yml index d3788674d6d..7f77c096df4 100644 --- a/.github/workflows/readme.yml +++ b/.github/workflows/readme.yml @@ -16,7 +16,7 @@ jobs: steps: - name: Check for changed files id: changedfiles - uses: umani/changed-files@0239328a3a6268aad16af7c3e4efc78e32d6c0f0 # Version 4.0.1 + uses: umani/changed-files@d7f842d11479940a6036e3aacc6d35523e6ba978 # Version 4.1.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} pattern: '^(?!.*node_modules).*README\.md$' From 4519370b2119d4a867a6f1ec9db0f15ea9a1c892 Mon Sep 17 00:00:00 2001 From: Mateusz Sekara Date: Mon, 16 Oct 2023 15:52:32 +0200 Subject: [PATCH 9/9] Support for finality tags in LogPoller (#10762) * Support for finality in the LogPoller * Post review fixes * Post rebase fixes --- core/chains/evm/chain.go | 11 +- .../evm/client/simulated_backend_client.go | 48 +- .../evm/forwarders/forwarder_manager_test.go | 4 +- core/chains/evm/logpoller/disabled.go | 4 - core/chains/evm/logpoller/helper_test.go | 7 +- core/chains/evm/logpoller/log_poller.go | 207 +-- .../evm/logpoller/log_poller_internal_test.go | 83 +- core/chains/evm/logpoller/log_poller_test.go | 1145 ++++++++++++----- core/chains/evm/logpoller/mocks/log_poller.go | 33 - core/chains/evm/logpoller/models.go | 7 +- core/chains/evm/logpoller/observability.go | 10 +- .../evm/logpoller/observability_test.go | 91 +- core/chains/evm/logpoller/orm.go | 70 +- core/chains/evm/logpoller/orm_test.go | 204 +-- core/chains/evm/logpoller/query.go | 33 +- core/chains/evm/txmgr/txmgr_test.go | 2 +- .../evm21/logprovider/integration_test.go | 2 +- core/services/pg/q.go | 1 + core/services/relay/evm/config_poller_test.go | 2 +- .../relay/evm/functions/config_poller_test.go | 2 +- .../relay/evm/mercury/helpers_test.go | 2 +- core/store/migrate/migrate_test.go | 142 ++ .../0201_add_finalized_block_number.sql | 11 + ...lt_values_for_last_finalized_block.sql.sql | 33 + 24 files changed, 1440 insertions(+), 714 deletions(-) create mode 100644 core/store/migrate/migrations/0201_add_finalized_block_number.sql create mode 100644 core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql diff --git a/core/chains/evm/chain.go b/core/chains/evm/chain.go index 58c793cc646..2a8f8a2e840 100644 --- a/core/chains/evm/chain.go +++ b/core/chains/evm/chain.go @@ -248,7 +248,16 @@ func newChain(ctx context.Context, cfg *evmconfig.ChainScoped, nodes []*toml.Nod if opts.GenLogPoller != nil { logPoller = opts.GenLogPoller(chainID) } else { - logPoller = logpoller.NewLogPoller(logpoller.NewObservedORM(chainID, db, l, cfg.Database()), client, l, cfg.EVM().LogPollInterval(), int64(cfg.EVM().FinalityDepth()), int64(cfg.EVM().LogBackfillBatchSize()), int64(cfg.EVM().RPCDefaultBatchSize()), int64(cfg.EVM().LogKeepBlocksDepth())) + logPoller = logpoller.NewLogPoller( + logpoller.NewObservedORM(chainID, db, l, cfg.Database()), + client, + l, + cfg.EVM().LogPollInterval(), + cfg.EVM().FinalityTagEnabled(), + int64(cfg.EVM().FinalityDepth()), + int64(cfg.EVM().LogBackfillBatchSize()), + int64(cfg.EVM().RPCDefaultBatchSize()), + int64(cfg.EVM().LogKeepBlocksDepth())) } } diff --git a/core/chains/evm/client/simulated_backend_client.go b/core/chains/evm/client/simulated_backend_client.go index dd79c549bfe..abab2046620 100644 --- a/core/chains/evm/client/simulated_backend_client.go +++ b/core/chains/evm/client/simulated_backend_client.go @@ -545,7 +545,7 @@ func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.B if len(elem.Args) != 2 { return fmt.Errorf("SimulatedBackendClient expected 2 args, got %d for eth_getBlockByNumber", len(elem.Args)) } - blockNum, is := elem.Args[0].(string) + blockNumOrTag, is := elem.Args[0].(string) if !is { return fmt.Errorf("SimulatedBackendClient expected first arg to be a string for eth_getBlockByNumber, got: %T", elem.Args[0]) } @@ -553,31 +553,24 @@ func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.B if !is { return fmt.Errorf("SimulatedBackendClient expected second arg to be a boolean for eth_getBlockByNumber, got: %T", elem.Args[1]) } - n, ok := new(big.Int).SetString(blockNum, 0) - if !ok { - return fmt.Errorf("error while converting block number string: %s to big.Int ", blockNum) - } - header, err := c.b.HeaderByNumber(ctx, n) + header, err := c.fetchHeader(ctx, blockNumOrTag) if err != nil { return err } - switch v := elem.Result.(type) { + switch res := elem.Result.(type) { case *evmtypes.Head: - b[i].Result = &evmtypes.Head{ - Number: header.Number.Int64(), - Hash: header.Hash(), - Timestamp: time.Unix(int64(header.Time), 0).UTC(), - } + res.Number = header.Number.Int64() + res.Hash = header.Hash() + res.ParentHash = header.ParentHash + res.Timestamp = time.Unix(int64(header.Time), 0).UTC() case *evmtypes.Block: - b[i].Result = &evmtypes.Block{ - Number: header.Number.Int64(), - Hash: header.Hash(), - Timestamp: time.Unix(int64(header.Time), 0), - } + res.Number = header.Number.Int64() + res.Hash = header.Hash() + res.ParentHash = header.ParentHash + res.Timestamp = time.Unix(int64(header.Time), 0).UTC() default: - return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", v) + return fmt.Errorf("SimulatedBackendClient Unexpected Type %T", elem.Result) } - b[i].Error = err case "eth_call": if len(elem.Args) != 2 { @@ -718,3 +711,20 @@ func toCallMsg(params map[string]interface{}) ethereum.CallMsg { func (c *SimulatedBackendClient) IsL2() bool { return false } + +func (c *SimulatedBackendClient) fetchHeader(ctx context.Context, blockNumOrTag string) (*types.Header, error) { + switch blockNumOrTag { + case rpc.SafeBlockNumber.String(): + return c.b.Blockchain().CurrentSafeBlock(), nil + case rpc.LatestBlockNumber.String(): + return c.b.Blockchain().CurrentHeader(), nil + case rpc.FinalizedBlockNumber.String(): + return c.b.Blockchain().CurrentFinalBlock(), nil + default: + blockNum, ok := new(big.Int).SetString(blockNumOrTag, 0) + if !ok { + return nil, fmt.Errorf("error while converting block number string: %s to big.Int ", blockNumOrTag) + } + return c.b.HeaderByNumber(ctx, blockNum) + } +} diff --git a/core/chains/evm/forwarders/forwarder_manager_test.go b/core/chains/evm/forwarders/forwarder_manager_test.go index bcbe43ef9b6..0117c2f2c07 100644 --- a/core/chains/evm/forwarders/forwarder_manager_test.go +++ b/core/chains/evm/forwarders/forwarder_manager_test.go @@ -58,7 +58,7 @@ func TestFwdMgr_MaybeForwardTransaction(t *testing.T) { t.Log(authorized) evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) fwdMgr.ORM = forwarders.NewORM(db, logger.TestLogger(t), cfg.Database()) @@ -111,7 +111,7 @@ func TestFwdMgr_AccountUnauthorizedToForward_SkipsForwarding(t *testing.T) { ec.Commit() evmClient := client.NewSimulatedBackendClient(t, ec, testutils.FixtureChainID) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), evmClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) fwdMgr := forwarders.NewFwdMgr(db, evmClient, lp, lggr, evmcfg.EVM(), evmcfg.Database()) fwdMgr.ORM = forwarders.NewORM(db, logger.TestLogger(t), cfg.Database()) diff --git a/core/chains/evm/logpoller/disabled.go b/core/chains/evm/logpoller/disabled.go index 06f0b9200a3..4bcf1c50863 100644 --- a/core/chains/evm/logpoller/disabled.go +++ b/core/chains/evm/logpoller/disabled.go @@ -104,7 +104,3 @@ func (d disabled) IndexedLogsCreatedAfter(eventSig common.Hash, address common.A func (d disabled) LatestBlockByEventSigsAddrsWithConfs(fromBlock int64, eventSigs []common.Hash, addresses []common.Address, confs Confirmations, qopts ...pg.QOpt) (int64, error) { return 0, ErrDisabled } - -func (d disabled) LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return nil, ErrDisabled -} diff --git a/core/chains/evm/logpoller/helper_test.go b/core/chains/evm/logpoller/helper_test.go index 447a4673588..86208f2c5b4 100644 --- a/core/chains/evm/logpoller/helper_test.go +++ b/core/chains/evm/logpoller/helper_test.go @@ -45,7 +45,7 @@ type TestHarness struct { EthDB ethdb.Database } -func SetupTH(t testing.TB, finalityDepth, backfillBatchSize, rpcBatchSize int64) TestHarness { +func SetupTH(t testing.TB, useFinalityTag bool, finalityDepth, backfillBatchSize, rpcBatchSize int64) TestHarness { lggr := logger.TestLogger(t) chainID := testutils.NewRandomEVMChainID() chainID2 := testutils.NewRandomEVMChainID() @@ -63,7 +63,10 @@ func SetupTH(t testing.TB, finalityDepth, backfillBatchSize, rpcBatchSize int64) // Poll period doesn't matter, we intend to call poll and save logs directly in the test. // Set it to some insanely high value to not interfere with any tests. esc := client.NewSimulatedBackendClient(t, ec, chainID) - lp := logpoller.NewLogPoller(o, esc, lggr, 1*time.Hour, finalityDepth, backfillBatchSize, rpcBatchSize, 1000) + // Mark genesis block as finalized to avoid any nulls in the tests + head := esc.Backend().Blockchain().CurrentHeader() + esc.Backend().Blockchain().SetFinalized(head) + lp := logpoller.NewLogPoller(o, esc, lggr, 1*time.Hour, useFinalityTag, finalityDepth, backfillBatchSize, rpcBatchSize, 1000) emitterAddress1, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) require.NoError(t, err) emitterAddress2, _, emitter2, err := log_emitter.DeployLogEmitter(owner, ec) diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 9ce296e1a0b..95039f59cb0 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -58,11 +58,15 @@ type LogPoller interface { IndexedLogsWithSigsExcluding(address common.Address, eventSigA, eventSigB common.Hash, topicIndex int, fromBlock, toBlock int64, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) LogsDataWordRange(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) LogsDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) } type Confirmations int +const ( + Finalized = Confirmations(-1) + Unconfirmed = Confirmations(0) +) + type LogPollerTest interface { LogPoller PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) @@ -88,15 +92,16 @@ var ( type logPoller struct { utils.StartStopOnce - ec Client - orm ORM - lggr logger.Logger - pollPeriod time.Duration // poll period set by block production rate - finalityDepth int64 // finality depth is taken to mean that block (head - finality) is finalized - keepBlocksDepth int64 // the number of blocks behind the head for which we keep the blocks. Must be greater than finality depth + 1. - backfillBatchSize int64 // batch size to use when backfilling finalized logs - rpcBatchSize int64 // batch size to use for fallback RPC calls made in GetBlocks - backupPollerNextBlock int64 + ec Client + orm ORM + lggr logger.Logger + pollPeriod time.Duration // poll period set by block production rate + useFinalityTag bool // indicates whether logPoller should use chain's finality or pick a fixed depth for finality + finalityDepth int64 // finality depth is taken to mean that block (head - finality) is finalized. If `useFinalityTag` is set to true, this value is ignored, because finalityDepth is fetched from chain + keepFinalizedBlocksDepth int64 // the number of blocks behind the last finalized block we keep in database + backfillBatchSize int64 // batch size to use when backfilling finalized logs + rpcBatchSize int64 // batch size to use for fallback RPC calls made in GetBlocks + backupPollerNextBlock int64 filterMu sync.RWMutex filters map[string]Filter @@ -122,21 +127,22 @@ type logPoller struct { // How fast that can be done depends largely on network speed and DB, but even for the fastest // support chain, polygon, which has 2s block times, we need RPCs roughly with <= 500ms latency func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, pollPeriod time.Duration, - finalityDepth int64, backfillBatchSize int64, rpcBatchSize int64, keepBlocksDepth int64) *logPoller { + useFinalityTag bool, finalityDepth int64, backfillBatchSize int64, rpcBatchSize int64, keepFinalizedBlocksDepth int64) *logPoller { return &logPoller{ - ec: ec, - orm: orm, - lggr: lggr.Named("LogPoller"), - replayStart: make(chan int64), - replayComplete: make(chan error), - pollPeriod: pollPeriod, - finalityDepth: finalityDepth, - backfillBatchSize: backfillBatchSize, - rpcBatchSize: rpcBatchSize, - keepBlocksDepth: keepBlocksDepth, - filters: make(map[string]Filter), - filterDirty: true, // Always build Filter on first call to cache an empty filter if nothing registered yet. + ec: ec, + orm: orm, + lggr: lggr.Named("LogPoller"), + replayStart: make(chan int64), + replayComplete: make(chan error), + pollPeriod: pollPeriod, + finalityDepth: finalityDepth, + useFinalityTag: useFinalityTag, + backfillBatchSize: backfillBatchSize, + rpcBatchSize: rpcBatchSize, + keepFinalizedBlocksDepth: keepFinalizedBlocksDepth, + filters: make(map[string]Filter), + filterDirty: true, // Always build Filter on first call to cache an empty filter if nothing registered yet. } } @@ -371,11 +377,6 @@ func (lp *logPoller) ReplayAsync(fromBlock int64) { } func (lp *logPoller) Start(parentCtx context.Context) error { - if lp.keepBlocksDepth < (lp.finalityDepth + 1) { - // We add 1 since for reorg detection on the first unfinalized block - // we need to keep 1 finalized block. - return errors.Errorf("keepBlocksDepth %d must be greater than finality %d + 1", lp.keepBlocksDepth, lp.finalityDepth) - } return lp.StartOnce("LogPoller", func() error { ctx, cancel := context.WithCancel(parentCtx) lp.ctx = ctx @@ -497,21 +498,20 @@ func (lp *logPoller) run() { } // Otherwise this is the first poll _ever_ on a new chain. // Only safe thing to do is to start at the first finalized block. - latest, err := lp.ec.HeadByNumber(lp.ctx, nil) + latestBlock, latestFinalizedBlockNumber, err := lp.latestBlocks(lp.ctx) if err != nil { lp.lggr.Warnw("Unable to get latest for first poll", "err", err) continue } - latestNum := latest.Number // Do not support polling chains which don't even have finality depth worth of blocks. // Could conceivably support this but not worth the effort. - // Need finality depth + 1, no block 0. - if latestNum <= lp.finalityDepth { - lp.lggr.Warnw("Insufficient number of blocks on chain, waiting for finality depth", "err", err, "latest", latestNum, "finality", lp.finalityDepth) + // Need last finalized block number to be higher than 0 + if latestFinalizedBlockNumber <= 0 { + lp.lggr.Warnw("Insufficient number of blocks on chain, waiting for finality depth", "err", err, "latest", latestBlock.Number) continue } // Starting at the first finalized block. We do not backfill the first finalized block. - start = latestNum - lp.finalityDepth + start = latestFinalizedBlockNumber } else { start = lastProcessed.BlockNumber + 1 } @@ -558,22 +558,19 @@ func (lp *logPoller) BackupPollAndSaveLogs(ctx context.Context, backupPollerBloc } return } - - // If this is our first run, start max(finalityDepth+1, backupPollerBlockDelay) blocks behind the last processed + // If this is our first run, start from block min(lastProcessed.FinalizedBlockNumber-1, lastProcessed.BlockNumber-backupPollerBlockDelay) + backupStartBlock := mathutil.Min(lastProcessed.FinalizedBlockNumber-1, lastProcessed.BlockNumber-backupPollerBlockDelay) // (or at block 0 if whole blockchain is too short) - lp.backupPollerNextBlock = lastProcessed.BlockNumber - mathutil.Max(lp.finalityDepth+1, backupPollerBlockDelay) - if lp.backupPollerNextBlock < 0 { - lp.backupPollerNextBlock = 0 - } + lp.backupPollerNextBlock = mathutil.Max(backupStartBlock, 0) } - latestBlock, err := lp.ec.HeadByNumber(ctx, nil) + _, latestFinalizedBlockNumber, err := lp.latestBlocks(ctx) if err != nil { lp.lggr.Warnw("Backup logpoller failed to get latest block", "err", err) return } - lastSafeBackfillBlock := latestBlock.Number - lp.finalityDepth - 1 + lastSafeBackfillBlock := latestFinalizedBlockNumber - 1 if lastSafeBackfillBlock >= lp.backupPollerNextBlock { lp.lggr.Infow("Backup poller backfilling logs", "start", lp.backupPollerNextBlock, "end", lastSafeBackfillBlock) if err = lp.backfill(ctx, lp.backupPollerNextBlock, lastSafeBackfillBlock); err != nil { @@ -735,7 +732,7 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren // There can be another reorg while we're finding the LCA. // That is ok, since we'll detect it on the next iteration. // Since we go currentBlock by currentBlock for unfinalized logs, the mismatch starts at currentBlockNumber - 1. - blockAfterLCA, err2 := lp.findBlockAfterLCA(ctx, currentBlock) + blockAfterLCA, err2 := lp.findBlockAfterLCA(ctx, currentBlock, expectedParent.FinalizedBlockNumber) if err2 != nil { lp.lggr.Warnw("Unable to find LCA after reorg, retrying", "err", err2) return nil, errors.New("Unable to find LCA after reorg, retrying") @@ -780,7 +777,9 @@ func (lp *logPoller) getCurrentBlockMaybeHandleReorg(ctx context.Context, curren // conditions this would be equal to lastProcessed.BlockNumber + 1. func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int64) { lp.lggr.Debugw("Polling for logs", "currentBlockNumber", currentBlockNumber) - latestBlock, err := lp.ec.HeadByNumber(ctx, nil) + // Intentionally not using logPoller.finalityDepth directly but the latestFinalizedBlockNumber returned from lp.latestBlocks() + // latestBlocks knows how to pick a proper latestFinalizedBlockNumber based on the logPoller's configuration + latestBlock, latestFinalizedBlockNumber, err := lp.latestBlocks(ctx) if err != nil { lp.lggr.Warnw("Unable to get latestBlockNumber block", "err", err, "currentBlockNumber", currentBlockNumber) return @@ -813,7 +812,7 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int // E.g. 1<-2<-3(currentBlockNumber)<-4<-5<-6<-7(latestBlockNumber), finality is 2. So 3,4 can be batched. // Although 5 is finalized, we still need to save it to the db for reorg detection if 6 is a reorg. // start = currentBlockNumber = 3, end = latestBlockNumber - finality - 1 = 7-2-1 = 4 (inclusive range). - lastSafeBackfillBlock := latestBlockNumber - lp.finalityDepth - 1 + lastSafeBackfillBlock := latestFinalizedBlockNumber - 1 if lastSafeBackfillBlock >= currentBlockNumber { lp.lggr.Infow("Backfilling logs", "start", currentBlockNumber, "end", lastSafeBackfillBlock) if err = lp.backfill(ctx, currentBlockNumber, lastSafeBackfillBlock); err != nil { @@ -847,7 +846,7 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int } lp.lggr.Debugw("Unfinalized log query", "logs", len(logs), "currentBlockNumber", currentBlockNumber, "blockHash", currentBlock.Hash, "timestamp", currentBlock.Timestamp.Unix()) err = lp.orm.Q().WithOpts(pg.WithParentCtx(ctx)).Transaction(func(tx pg.Queryer) error { - if err2 := lp.orm.InsertBlock(h, currentBlockNumber, currentBlock.Timestamp, pg.WithQueryer(tx)); err2 != nil { + if err2 := lp.orm.InsertBlock(h, currentBlockNumber, currentBlock.Timestamp, latestFinalizedBlockNumber, pg.WithQueryer(tx)); err2 != nil { return err2 } if len(logs) == 0 { @@ -881,9 +880,37 @@ func (lp *logPoller) PollAndSaveLogs(ctx context.Context, currentBlockNumber int } } +// Returns information about latestBlock, latestFinalizedBlockNumber +// If finality tag is not enabled, latestFinalizedBlockNumber is calculated as latestBlockNumber - lp.finalityDepth (configured param) +// Otherwise, we return last finalized block number returned from chain +func (lp *logPoller) latestBlocks(ctx context.Context) (*evmtypes.Head, int64, error) { + // If finality is not enabled, we can only fetch the latest block + if !lp.useFinalityTag { + // Example: + // finalityDepth = 2 + // Blocks: 1->2->3->4->5(latestBlock) + // latestFinalizedBlockNumber would be 3 + latestBlock, err := lp.ec.HeadByNumber(ctx, nil) + if err != nil { + return nil, 0, err + } + // If chain has fewer blocks than finalityDepth, return 0 + return latestBlock, mathutil.Max(latestBlock.Number-lp.finalityDepth, 0), nil + } + + // If finality is enabled, we need to get the latest and finalized blocks. + blocks, err := lp.batchFetchBlocks(ctx, []string{rpc.LatestBlockNumber.String(), rpc.FinalizedBlockNumber.String()}, 2) + if err != nil { + return nil, 0, err + } + latest := blocks[0] + finalized := blocks[1] + return latest, finalized.Number, nil +} + // Find the first place where our chain and their chain have the same block, // that block number is the LCA. Return the block after that, where we want to resume polling. -func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.Head) (*evmtypes.Head, error) { +func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.Head, latestFinalizedBlockNumber int64) (*evmtypes.Head, error) { // Current is where the mismatch starts. // Check its parent to see if its the same as ours saved. parent, err := lp.ec.HeadByHash(ctx, current.ParentHash) @@ -891,12 +918,11 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He return nil, err } blockAfterLCA := *current - reorgStart := parent.Number - // We expect reorgs up to the block after (current - finalityDepth), - // since the block at (current - finalityDepth) is finalized. + // We expect reorgs up to the block after latestFinalizedBlock // We loop via parent instead of current so current always holds the LCA+1. // If the parent block number becomes < the first finalized block our reorg is too deep. - for parent.Number >= (reorgStart - lp.finalityDepth) { + // This can happen only if finalityTag is not enabled and fixed finalityDepth is provided via config. + for parent.Number >= latestFinalizedBlockNumber { ourParentBlockHash, err := lp.orm.SelectBlockByNumber(parent.Number, pg.WithParentCtx(ctx)) if err != nil { return nil, err @@ -912,28 +938,25 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He return nil, err } } - lp.lggr.Criticalw("Reorg greater than finality depth detected", "max reorg depth", lp.finalityDepth-1) + lp.lggr.Criticalw("Reorg greater than finality depth detected", "finalityTag", lp.useFinalityTag, "current", current.Number, "latestFinalized", latestFinalizedBlockNumber) rerr := errors.New("Reorg greater than finality depth") lp.SvcErrBuffer.Append(rerr) return nil, rerr } -// pruneOldBlocks removes blocks that are > lp.ancientBlockDepth behind the head. +// pruneOldBlocks removes blocks that are > lp.ancientBlockDepth behind the latest finalized block. func (lp *logPoller) pruneOldBlocks(ctx context.Context) error { - latest, err := lp.ec.HeadByNumber(ctx, nil) + _, latestFinalizedBlock, err := lp.latestBlocks(ctx) if err != nil { return err } - if latest == nil { - return errors.Errorf("received nil block from RPC") - } - if latest.Number <= lp.keepBlocksDepth { + if latestFinalizedBlock <= lp.keepFinalizedBlocksDepth { // No-op, keep all blocks return nil } - // 1-2-3-4-5(latest), keepBlocksDepth=3 + // 1-2-3-4-5(finalized)-6-7(latest), keepFinalizedBlocksDepth=3 // Remove <= 2 - return lp.orm.DeleteBlocksBefore(latest.Number-lp.keepBlocksDepth, pg.WithParentCtx(ctx)) + return lp.orm.DeleteBlocksBefore(latestFinalizedBlock-lp.keepFinalizedBlocksDepth, pg.WithParentCtx(ctx)) } // Logs returns logs matching topics and address (exactly) in the given block range, @@ -984,12 +1007,6 @@ func (lp *logPoller) IndexedLogsTopicGreaterThan(eventSig common.Hash, address c return lp.orm.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) } -// LogsUntilBlockHashDataWordGreaterThan note index is 0 based. -// If the blockhash is not found (i.e. a stale fork) it will error. -func (lp *logPoller) LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return lp.orm.SelectLogsUntilBlockHashDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, untilBlockHash, qopts...) -} - func (lp *logPoller) IndexedLogsTopicRange(eventSig common.Hash, address common.Address, topicIndex int, topicValueMin common.Hash, topicValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { return lp.orm.SelectIndexedLogsTopicRange(address, eventSig, topicIndex, topicValueMin, topicValueMax, confs, qopts...) } @@ -1043,7 +1060,7 @@ func (lp *logPoller) GetBlocksRange(ctx context.Context, numbers []uint64, qopts qopts = append(qopts, pg.WithParentCtx(ctx)) minRequestedBlock := int64(mathutil.Min(numbers[0], numbers[1:]...)) maxRequestedBlock := int64(mathutil.Max(numbers[0], numbers[1:]...)) - lpBlocks, err := lp.orm.GetBlocksRange(minRequestedBlock, maxRequestedBlock, qopts...) + lpBlocks, err := lp.orm.GetBlocksRange(int64(minRequestedBlock), int64(maxRequestedBlock), qopts...) if err != nil { lp.lggr.Warnw("Error while retrieving blocks from log pollers blocks table. Falling back to RPC...", "requestedBlocks", numbers, "err", err) } else { @@ -1086,17 +1103,10 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( blocksRequested map[uint64]struct{}, blocksFound map[uint64]LogPollerBlock, ) (map[uint64]LogPollerBlock, error) { - var reqs []rpc.BatchElem - var remainingBlocks []uint64 + var remainingBlocks []string for num := range blocksRequested { if _, ok := blocksFound[num]; !ok { - req := rpc.BatchElem{ - Method: "eth_getBlockByNumber", - Args: []interface{}{hexutil.EncodeBig(big.NewInt(0).SetUint64(num)), false}, - Result: &evmtypes.Head{}, - } - reqs = append(reqs, req) - remainingBlocks = append(remainingBlocks, num) + remainingBlocks = append(remainingBlocks, hexutil.EncodeBig(new(big.Int).SetUint64(num))) } } @@ -1105,8 +1115,37 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( "remainingBlocks", remainingBlocks) } - for i := 0; i < len(reqs); i += int(lp.rpcBatchSize) { - j := i + int(lp.rpcBatchSize) + evmBlocks, err := lp.batchFetchBlocks(ctx, remainingBlocks, lp.rpcBatchSize) + if err != nil { + return nil, err + } + + logPollerBlocks := make(map[uint64]LogPollerBlock) + for _, head := range evmBlocks { + logPollerBlocks[uint64(head.Number)] = LogPollerBlock{ + EvmChainId: head.EVMChainID, + BlockHash: head.Hash, + BlockNumber: head.Number, + BlockTimestamp: head.Timestamp, + CreatedAt: head.Timestamp, + } + } + return logPollerBlocks, nil +} + +func (lp *logPoller) batchFetchBlocks(ctx context.Context, blocksRequested []string, batchSize int64) ([]*evmtypes.Head, error) { + reqs := make([]rpc.BatchElem, 0, len(blocksRequested)) + for _, num := range blocksRequested { + req := rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{num, false}, + Result: &evmtypes.Head{}, + } + reqs = append(reqs, req) + } + + for i := 0; i < len(reqs); i += int(batchSize) { + j := i + int(batchSize) if j > len(reqs) { j = len(reqs) } @@ -1117,7 +1156,7 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( } } - var blocksFoundFromRPC = make(map[uint64]LogPollerBlock) + var blocks = make([]*evmtypes.Head, 0, len(reqs)) for _, r := range reqs { if r.Error != nil { return nil, r.Error @@ -1136,16 +1175,10 @@ func (lp *logPoller) fillRemainingBlocksFromRPC( if block.Number < 0 { return nil, errors.Errorf("expected block number to be >= to 0, got %d", block.Number) } - blocksFoundFromRPC[uint64(block.Number)] = LogPollerBlock{ - EvmChainId: block.EVMChainID, - BlockHash: block.Hash, - BlockNumber: block.Number, - BlockTimestamp: block.Timestamp, - CreatedAt: block.Timestamp, - } + blocks = append(blocks, block) } - return blocksFoundFromRPC, nil + return blocks, nil } // IndexedLogsWithSigsExcluding returns the set difference(A-B) of logs with signature sigA and sigB, matching is done on the topics index diff --git a/core/chains/evm/logpoller/log_poller_internal_test.go b/core/chains/evm/logpoller/log_poller_internal_test.go index 271d8c2a582..b9474158a6b 100644 --- a/core/chains/evm/logpoller/log_poller_internal_test.go +++ b/core/chains/evm/logpoller/log_poller_internal_test.go @@ -2,7 +2,9 @@ package logpoller import ( "context" + "fmt" "math/big" + "reflect" "strings" "sync" "testing" @@ -11,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -24,6 +27,7 @@ import ( "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/pg" + "github.com/smartcontractkit/chainlink/v2/core/utils" ) var ( @@ -57,7 +61,7 @@ func TestLogPoller_RegisterFilter(t *testing.T) { orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) // Set up a test chain with a log emitting contract deployed. - lp := NewLogPoller(orm, nil, lggr, time.Hour, 1, 1, 2, 1000) + lp := NewLogPoller(orm, nil, lggr, time.Hour, false, 1, 1, 2, 1000) // We expect a zero Filter if nothing registered yet. f := lp.Filter(nil, nil, nil) @@ -211,7 +215,7 @@ func TestLogPoller_BackupPollerStartup(t *testing.T) { ctx := testutils.Context(t) - lp := NewLogPoller(orm, ec, lggr, 1*time.Hour, 2, 3, 2, 1000) + lp := NewLogPoller(orm, ec, lggr, 1*time.Hour, false, 2, 3, 2, 1000) lp.BackupPollAndSaveLogs(ctx, 100) assert.Equal(t, int64(0), lp.backupPollerNextBlock) assert.Equal(t, 1, observedLogs.FilterMessageSnippet("ran before first successful log poller run").Len()) @@ -252,7 +256,7 @@ func TestLogPoller_Replay(t *testing.T) { ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) ec.On("FilterLogs", mock.Anything, mock.Anything).Return([]types.Log{log1}, nil).Once() ec.On("ConfiguredChainID").Return(chainID, nil) - lp := NewLogPoller(orm, ec, lggr, time.Hour, 3, 3, 3, 20) + lp := NewLogPoller(orm, ec, lggr, time.Hour, false, 3, 3, 3, 20) // process 1 log in block 3 lp.PollAndSaveLogs(tctx, 4) @@ -456,9 +460,80 @@ func TestLogPoller_Replay(t *testing.T) { }) } +func Test_latestBlockAndFinalityDepth(t *testing.T) { + tctx := testutils.Context(t) + lggr, _ := logger.TestLoggerObserved(t, zapcore.ErrorLevel) + chainID := testutils.FixtureChainID + db := pgtest.NewSqlxDB(t) + orm := NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) + + t.Run("pick latest block from chain and use finality from config with finality disabled", func(t *testing.T) { + head := evmtypes.Head{Number: 4} + finalityDepth := int64(3) + ec := evmclimocks.NewClient(t) + ec.On("HeadByNumber", mock.Anything, mock.Anything).Return(&head, nil) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, false, finalityDepth, 3, 3, 20) + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(tctx) + require.NoError(t, err) + require.Equal(t, latestBlock.Number, head.Number) + require.Equal(t, finalityDepth, latestBlock.Number-lastFinalizedBlockNumber) + }) + + t.Run("finality tags in use", func(t *testing.T) { + t.Run("client returns data properly", func(t *testing.T) { + expectedLatestBlockNumber := int64(20) + expectedLastFinalizedBlockNumber := int64(12) + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + reflect.DeepEqual(b[0].Args, []interface{}{"latest", false}) && + reflect.DeepEqual(b[1].Args, []interface{}{"finalized", false}) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Latest block details + *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLatestBlockNumber, Hash: utils.RandomBytes32()} + // Finalized block details + *(elems[1].Result.(*evmtypes.Head)) = evmtypes.Head{Number: expectedLastFinalizedBlockNumber, Hash: utils.RandomBytes32()} + }) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + + latestBlock, lastFinalizedBlockNumber, err := lp.latestBlocks(tctx) + require.NoError(t, err) + require.Equal(t, expectedLatestBlockNumber, latestBlock.Number) + require.Equal(t, expectedLastFinalizedBlockNumber, lastFinalizedBlockNumber) + }) + + t.Run("client returns error for at least one of the calls", func(t *testing.T) { + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // Latest block details + *(elems[0].Result.(*evmtypes.Head)) = evmtypes.Head{Number: 10} + // Finalized block details + elems[1].Error = fmt.Errorf("some error") + }) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + _, _, err := lp.latestBlocks(tctx) + require.Error(t, err) + }) + + t.Run("BatchCall returns an error", func(t *testing.T) { + ec := evmclimocks.NewClient(t) + ec.On("BatchCallContext", mock.Anything, mock.Anything).Return(fmt.Errorf("some error")) + + lp := NewLogPoller(orm, ec, lggr, time.Hour, true, 3, 3, 3, 20) + _, _, err := lp.latestBlocks(tctx) + require.Error(t, err) + }) + }) +} + func benchmarkFilter(b *testing.B, nFilters, nAddresses, nEvents int) { lggr := logger.TestLogger(b) - lp := NewLogPoller(nil, nil, lggr, 1*time.Hour, 2, 3, 2, 1000) + lp := NewLogPoller(nil, nil, lggr, 1*time.Hour, false, 2, 3, 2, 1000) for i := 0; i < nFilters; i++ { var addresses []common.Address var events []common.Hash diff --git a/core/chains/evm/logpoller/log_poller_test.go b/core/chains/evm/logpoller/log_poller_test.go index f6be57aa6e7..f6fc333ff73 100644 --- a/core/chains/evm/logpoller/log_poller_test.go +++ b/core/chains/evm/logpoller/log_poller_test.go @@ -76,7 +76,7 @@ func populateDatabase(t testing.TB, o *logpoller.DbORM, chainID *big.Int) (commo } require.NoError(t, o.InsertLogs(logs)) - require.NoError(t, o.InsertBlock(utils.RandomAddress().Hash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour))) + require.NoError(t, o.InsertBlock(utils.RandomAddress().Hash(), int64((j+1)*1000-1), startDate.Add(time.Duration(j*1000)*time.Hour), 0)) } return event1, address1, address2 @@ -120,7 +120,7 @@ func TestPopulateLoadedDB(t *testing.T) { }() // Confirm all the logs. - require.NoError(t, o.InsertBlock(common.HexToHash("0x10"), 1000000, time.Now())) + require.NoError(t, o.InsertBlock(common.HexToHash("0x10"), 1000000, time.Now(), 0)) func() { defer logRuntime(t, time.Now()) lgs, err1 := o.SelectLogsDataWordRange(address1, event1, 0, logpoller.EvmWord(500000), logpoller.EvmWord(500020), 0) @@ -145,7 +145,7 @@ func TestPopulateLoadedDB(t *testing.T) { } func TestLogPoller_Integration(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) th.Client.Commit() // Block 2. Ensure we have finality number of blocks err := th.LogPoller.RegisterFilter(logpoller.Filter{"Integration test", []common.Hash{EmitterABI.Events["Log1"].ID}, []common.Address{th.EmitterAddress1}, 0}) @@ -221,149 +221,344 @@ func TestLogPoller_Integration(t *testing.T) { // for the same block hash. We should be able to handle this without missing any logs, as // long as the logs returned for finalized blocks are consistent. func Test_BackupLogPoller(t *testing.T) { - th := SetupTH(t, 2, 3, 2) - // later, we will need at least 32 blocks filled with logs for cache invalidation - for i := int64(0); i < 32; i++ { - // to invalidate geth's internal read-cache, a matching log must be found in the bloom Filter - // for each of the 32 blocks - tx, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(i + 7)}) - require.NoError(t, err) - require.NotNil(t, tx) - th.Client.Commit() + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 2, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, } - ctx := testutils.Context(t) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) + // later, we will need at least 32 blocks filled with logs for cache invalidation + for i := int64(0); i < 32; i++ { + // to invalidate geth's internal read-cache, a matching log must be found in the bloom Filter + // for each of the 32 blocks + tx, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(i + 7)}) + require.NoError(t, err) + require.NotNil(t, tx) + th.Client.Commit() + } - filter1 := logpoller.Filter{"filter1", []common.Hash{ - EmitterABI.Events["Log1"].ID, - EmitterABI.Events["Log2"].ID}, - []common.Address{th.EmitterAddress1}, - 0} - err := th.LogPoller.RegisterFilter(filter1) - require.NoError(t, err) + ctx := testutils.Context(t) - filters, err := th.ORM.LoadFilters(pg.WithParentCtx(testutils.Context(t))) - require.NoError(t, err) - require.Equal(t, 1, len(filters)) - require.Equal(t, filter1, filters["filter1"]) + filter1 := logpoller.Filter{"filter1", []common.Hash{ + EmitterABI.Events["Log1"].ID, + EmitterABI.Events["Log2"].ID}, + []common.Address{th.EmitterAddress1}, + 0} + err := th.LogPoller.RegisterFilter(filter1) + require.NoError(t, err) - err = th.LogPoller.RegisterFilter( - logpoller.Filter{"filter2", - []common.Hash{EmitterABI.Events["Log1"].ID}, - []common.Address{th.EmitterAddress2}, 0}) - require.NoError(t, err) + filters, err := th.ORM.LoadFilters(pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + require.Equal(t, 1, len(filters)) + require.Equal(t, filter1, filters["filter1"]) - defer func() { - assert.NoError(t, th.LogPoller.UnregisterFilter("filter1")) - }() - defer func() { - assert.NoError(t, th.LogPoller.UnregisterFilter("filter2")) - }() + err = th.LogPoller.RegisterFilter( + logpoller.Filter{"filter2", + []common.Hash{EmitterABI.Events["Log1"].ID}, + []common.Address{th.EmitterAddress2}, 0}) + require.NoError(t, err) - // generate some tx's with logs - tx1, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) - require.NoError(t, err) - require.NotNil(t, tx1) + defer func() { + assert.NoError(t, th.LogPoller.UnregisterFilter("filter1")) + }() + defer func() { + assert.NoError(t, th.LogPoller.UnregisterFilter("filter2")) + }() - tx2, err := th.Emitter1.EmitLog2(th.Owner, []*big.Int{big.NewInt(2)}) - require.NoError(t, err) - require.NotNil(t, tx2) + // generate some tx's with logs + tx1, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + require.NotNil(t, tx1) - tx3, err := th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) - require.NoError(t, err) - require.NotNil(t, tx3) + tx2, err := th.Emitter1.EmitLog2(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + require.NotNil(t, tx2) + + tx3, err := th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) + require.NoError(t, err) + require.NotNil(t, tx3) + + th.Client.Commit() // commit block 34 with 3 tx's included + + h := th.Client.Blockchain().CurrentHeader() // get latest header + require.Equal(t, uint64(34), h.Number.Uint64()) + + // save these 3 receipts for later + receipts := rawdb.ReadReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), uint64(time.Now().Unix()), params.AllEthashProtocolChanges) + require.NotZero(t, receipts.Len()) + + // Simulate a situation where the rpc server has a block, but no logs available for it yet + // this can't happen with geth itself, but can with other clients. + rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), types.Receipts{}) // wipes out all logs for block 34 + + body := rawdb.ReadBody(th.EthDB, h.Hash(), h.Number.Uint64()) + require.Equal(t, 3, len(body.Transactions)) + txs := body.Transactions // save transactions for later + body.Transactions = types.Transactions{} // number of tx's must match # of logs for GetLogs() to succeed + rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + + currentBlock := th.PollAndSaveLogs(ctx, 1) + assert.Equal(t, int64(35), currentBlock) + + // simulate logs becoming available + rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), receipts) + require.True(t, rawdb.HasReceipts(th.EthDB, h.Hash(), h.Number.Uint64())) + body.Transactions = txs + rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + + // flush out cached block 34 by reading logs from first 32 blocks + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(2)), + ToBlock: big.NewInt(int64(33)), + Addresses: []common.Address{th.EmitterAddress1}, + Topics: [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, + } + fLogs, err := th.Client.FilterLogs(ctx, query) + require.NoError(t, err) + require.Equal(t, 32, len(fLogs)) - th.Client.Commit() // commit block 34 with 3 tx's included + // logs shouldn't show up yet + logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 0, len(logs)) + + th.Client.Commit() + th.Client.Commit() + markBlockAsFinalized(t, th, 34) - h := th.Client.Blockchain().CurrentHeader() // get latest header - require.Equal(t, uint64(34), h.Number.Uint64()) + // Run ordinary poller + backup poller at least once + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) - // save these 3 receipts for later - receipts := rawdb.ReadReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), uint64(time.Now().Unix()), params.AllEthashProtocolChanges) - require.NotZero(t, receipts.Len()) + require.Equal(t, int64(37), currentBlock+1) - // Simulate a situation where the rpc server has a block, but no logs available for it yet - // this can't happen with geth itself, but can with other clients. - rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), types.Receipts{}) // wipes out all logs for block 34 + // logs still shouldn't show up, because we don't want to backfill the last finalized log + // to help with reorg detection + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 0, len(logs)) + th.Client.Commit() + markBlockAsFinalized(t, th, 35) - body := rawdb.ReadBody(th.EthDB, h.Hash(), h.Number.Uint64()) - require.Equal(t, 3, len(body.Transactions)) - txs := body.Transactions // save transactions for later - body.Transactions = types.Transactions{} // number of tx's must match # of logs for GetLogs() to succeed - rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + // Run ordinary poller + backup poller at least once more + th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + require.Equal(t, int64(38), currentBlock+1) + + // all 3 logs in block 34 should show up now, thanks to backup logger + logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 5, len(logs)) + logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 1, len(logs)) + logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2, + pg.WithParentCtx(testutils.Context(t))) + require.NoError(t, err) + assert.Equal(t, 1, len(logs)) + }) + } +} + +func TestLogPoller_BackupPollAndSaveLogsWithPollerNotWorking(t *testing.T) { + emittedLogs := 30 + // Intentionally use very low backupLogPollerDelay to verify if finality is used properly + backupLogPollerDelay := int64(0) + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2) + + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered + // 0 (finalized) -> 1 -> 2 -> ... currentBlock := th.PollAndSaveLogs(ctx, 1) - assert.Equal(t, int64(35), currentBlock) + // currentBlock should be blockChain start + number of emitted logs + 1 + assert.Equal(t, int64(emittedLogs)+header.Number.Int64()+1, currentBlock) - // simulate logs becoming available - rawdb.WriteReceipts(th.EthDB, h.Hash(), h.Number.Uint64(), receipts) - require.True(t, rawdb.HasReceipts(th.EthDB, h.Hash(), h.Number.Uint64())) - body.Transactions = txs - rawdb.WriteBody(th.EthDB, h.Hash(), h.Number.Uint64(), body) + // LogPoller not working, but chain in the meantime has progressed + // 0 -> 1 -> 2 -> ... -> currentBlock - 10 (finalized) -> .. -> currentBlock + markBlockAsFinalized(t, th, currentBlock-10) - // flush out cached block 34 by reading logs from first 32 blocks - query := ethereum.FilterQuery{ - FromBlock: big.NewInt(int64(2)), - ToBlock: big.NewInt(int64(33)), + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, Addresses: []common.Address{th.EmitterAddress1}, - Topics: [][]common.Hash{{EmitterABI.Events["Log1"].ID}}, - } - fLogs, err := th.Client.FilterLogs(ctx, query) + }) require.NoError(t, err) - require.Equal(t, 32, len(fLogs)) - // logs shouldn't show up yet - logs, err := th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + // LogPoller should backfill starting from the last finalized block stored in db (genesis block) + // till the latest finalized block reported by chain. + th.LogPoller.BackupPollAndSaveLogs(ctx, backupLogPollerDelay) + require.NoError(t, err) + + logs, err := th.LogPoller.Logs( + 0, + currentBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) require.NoError(t, err) - assert.Equal(t, 0, len(logs)) + require.Len(t, logs, emittedLogs-10) + // Progressing even more, move blockchain forward by 1 block and mark it as finalized th.Client.Commit() + markBlockAsFinalized(t, th, currentBlock) + th.LogPoller.BackupPollAndSaveLogs(ctx, backupLogPollerDelay) + + // All emitted logs should be backfilled + logs, err = th.LogPoller.Logs( + 0, + currentBlock+1, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) +} + +func TestLogPoller_BackupPollAndSaveLogsWithDeepBlockDelay(t *testing.T) { + emittedLogs := 30 + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Emit one more empty block th.Client.Commit() - // Run ordinary poller + backup poller at least once - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) - th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) - th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + // Mark everything as finalized + markBlockAsFinalized(t, th, header.Number.Int64()) - require.Equal(t, int64(37), currentBlock+1) + // First PollAndSave, no filters are registered, but finalization is the same as the latest block + // 1 -> 2 -> ... + th.PollAndSaveLogs(ctx, 1) - // logs still shouldn't show up, because we don't want to backfill the last finalized log - // to help with reorg detection - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + // Register filter + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) require.NoError(t, err) - assert.Equal(t, 0, len(logs)) - th.Client.Commit() + // Should fallback to the backupPollerBlockDelay when finalization was very high in a previous PollAndSave + th.LogPoller.BackupPollAndSaveLogs(ctx, int64(emittedLogs)) + require.NoError(t, err) - // Run ordinary poller + backup poller at least once more - th.LogPoller.PollAndSaveLogs(ctx, currentBlock+1) - th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - currentBlock, _ = th.LogPoller.LatestBlock(pg.WithParentCtx(testutils.Context(t))) + // All emitted logs should be backfilled + logs, err := th.LogPoller.Logs( + 0, + header.Number.Int64()+1, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) +} + +func TestLogPoller_BackupPollAndSaveLogsSkippingLogsThatAreTooOld(t *testing.T) { + logsBatch := 10 + // Intentionally use very low backupLogPollerDelay to verify if finality is used properly + ctx := testutils.Context(t) + th := SetupTH(t, true, 0, 3, 2) - require.Equal(t, int64(38), currentBlock+1) + //header, err := th.Client.HeaderByNumber(ctx, nil) + //require.NoError(t, err) - // all 3 logs in block 34 should show up now, thanks to backup logger - logs, err = th.LogPoller.Logs(30, 37, EmitterABI.Events["Log1"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + // Emit some logs in blocks + for i := 1; i <= logsBatch; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered, but finalization is the same as the latest block + // 1 -> 2 -> ... -> firstBatchBlock + firstBatchBlock := th.PollAndSaveLogs(ctx, 1) + // Mark current tip of the chain as finalized (after emitting 10 logs) + markBlockAsFinalized(t, th, firstBatchBlock) + + // Emit 2nd batch of block + for i := 1; i <= logsBatch; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(100 + i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // 1 -> 2 -> ... -> firstBatchBlock (finalized) -> .. -> firstBatchBlock + emitted logs + secondBatchBlock := th.PollAndSaveLogs(ctx, firstBatchBlock) + // Mark current tip of the block as finalized (after emitting 20 logs) + markBlockAsFinalized(t, th, secondBatchBlock) + + // Register filter + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) require.NoError(t, err) - assert.Equal(t, 5, len(logs)) - logs, err = th.LogPoller.Logs(34, 34, EmitterABI.Events["Log2"].ID, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t))) + + // Should pick logs starting from one block behind the latest finalized block + th.LogPoller.BackupPollAndSaveLogs(ctx, 0) require.NoError(t, err) - assert.Equal(t, 1, len(logs)) - logs, err = th.LogPoller.Logs(32, 36, EmitterABI.Events["Log1"].ID, th.EmitterAddress2, - pg.WithParentCtx(testutils.Context(t))) + + // Only the 2nd batch + 1 log from a previous batch should be backfilled, because we perform backfill starting + // from one block behind the latest finalized block + logs, err := th.LogPoller.Logs( + 0, + secondBatchBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) require.NoError(t, err) - assert.Equal(t, 1, len(logs)) + require.Len(t, logs, logsBatch+1) + require.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), logs[0].Data) } func TestLogPoller_BlockTimestamps(t *testing.T) { t.Parallel() ctx := testutils.Context(t) - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) addresses := []common.Address{th.EmitterAddress1, th.EmitterAddress2} topics := []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID} @@ -468,7 +663,7 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { }, 10e6) _, _, emitter1, err := log_emitter.DeployLogEmitter(owner, ec) require.NoError(t, err) - lp := logpoller.NewLogPoller(orm, client.NewSimulatedBackendClient(t, ec, chainID), lggr, 15*time.Second, int64(finalityDepth), 3, 2, 1000) + lp := logpoller.NewLogPoller(orm, client.NewSimulatedBackendClient(t, ec, chainID), lggr, 15*time.Second, false, int64(finalityDepth), 3, 2, 1000) for i := 0; i < finalityDepth; i++ { // Have enough blocks that we could reorg the full finalityDepth-1. ec.Commit() } @@ -535,223 +730,335 @@ func TestLogPoller_SynchronizedWithGeth(t *testing.T) { func TestLogPoller_PollAndSaveLogs(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) - // Set up a log poller listening for log emitter logs. - err := th.LogPoller.RegisterFilter(logpoller.Filter{ - "Test Emitter 1 & 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, - []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0, - }) - require.NoError(t, err) + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 3, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } - b, err := th.Client.BlockByNumber(testutils.Context(t), nil) - require.NoError(t, err) - require.Equal(t, uint64(1), b.NumberU64()) - require.Equal(t, uint64(10), b.Time()) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) - // Test scenario: single block in chain, no logs. - // Chain genesis <- 1 - // DB: empty - newStart := th.PollAndSaveLogs(testutils.Context(t), 1) - assert.Equal(t, int64(2), newStart) + // Set up a log poller listening for log emitter logs. + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + "Test Emitter 1 & 2", []common.Hash{EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, + []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0, + }) + require.NoError(t, err) - // We expect to have saved block 1. - lpb, err := th.ORM.SelectBlockByNumber(1) - require.NoError(t, err) - assert.Equal(t, lpb.BlockHash, b.Hash()) - assert.Equal(t, lpb.BlockNumber, int64(b.NumberU64())) - assert.Equal(t, int64(1), int64(b.NumberU64())) - assert.Equal(t, uint64(10), b.Time()) + b, err := th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(1), b.NumberU64()) + require.Equal(t, uint64(10), b.Time()) - // No logs. - lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) - require.NoError(t, err) - assert.Equal(t, 0, len(lgs)) - th.assertHaveCanonical(t, 1, 1) + // Test scenario: single block in chain, no logs. + // Chain genesis <- 1 + // DB: empty + newStart := th.PollAndSaveLogs(testutils.Context(t), 1) + assert.Equal(t, int64(2), newStart) - // Polling again should be a noop, since we are at the latest. - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(2), newStart) - latest, err := th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(1), latest.BlockNumber) - th.assertHaveCanonical(t, 1, 1) + // We expect to have saved block 1. + lpb, err := th.ORM.SelectBlockByNumber(1) + require.NoError(t, err) + assert.Equal(t, lpb.BlockHash, b.Hash()) + assert.Equal(t, lpb.BlockNumber, int64(b.NumberU64())) + assert.Equal(t, int64(1), int64(b.NumberU64())) + assert.Equal(t, uint64(10), b.Time()) - // Test scenario: one log 2 block chain. - // Chain gen <- 1 <- 2 (L1) - // DB: 1 - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) - require.NoError(t, err) - th.Client.Commit() + // No logs. + lgs, err := th.ORM.SelectLogsByBlockRange(1, 1) + require.NoError(t, err) + assert.Equal(t, 0, len(lgs)) + th.assertHaveCanonical(t, 1, 1) - // Polling should get us the L1 log. - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(3), newStart) - latest, err = th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(2), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) - require.NoError(t, err) - require.Equal(t, 1, len(lgs)) - assert.Equal(t, th.EmitterAddress1, lgs[0].Address) - assert.Equal(t, latest.BlockHash, lgs[0].BlockHash) - assert.Equal(t, latest.BlockTimestamp, lgs[0].BlockTimestamp) - assert.Equal(t, hexutil.Encode(lgs[0].Topics[0]), EmitterABI.Events["Log1"].ID.String()) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), - lgs[0].Data) - - // Test scenario: single block reorg with log. - // Chain gen <- 1 <- 2 (L1_1) - // \ 2'(L1_2) <- 3 - // DB: 1, 2 - // - Detect a reorg, - // - Update the block 2's hash - // - Save L1' - // - L1_1 deleted - reorgedOutBlock, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(2)) - require.NoError(t, err) - lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) - require.NoError(t, err) - require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) - require.NoError(t, err) - // Create 2' - th.Client.Commit() - // Create 3 (we need a new block for us to do any polling and detect the reorg). - th.Client.Commit() + // Polling again should be a noop, since we are at the latest. + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(2), newStart) + latest, err := th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(1), latest.BlockNumber) + th.assertHaveCanonical(t, 1, 1) - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(4), newStart) - latest, err = th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(3), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) - require.NoError(t, err) - require.Equal(t, 1, len(lgs)) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) - th.assertHaveCanonical(t, 1, 3) - - // Test scenario: reorg back to previous tip. - // Chain gen <- 1 <- 2 (L1_1) <- 3' (L1_3) <- 4 - // \ 2'(L1_2) <- 3 - require.NoError(t, th.Client.Fork(testutils.Context(t), reorgedOutBlock.Hash())) - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) - require.NoError(t, err) - // Create 3' - th.Client.Commit() - // Create 4 - th.Client.Commit() - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(5), newStart) - latest, err = th.ORM.SelectLatestBlock() - require.NoError(t, err) - assert.Equal(t, int64(4), latest.BlockNumber) - lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) - require.NoError(t, err) - // We expect ONLY L1_1 and L1_3 since L1_2 is reorg'd out. - assert.Equal(t, 2, len(lgs)) - assert.Equal(t, int64(2), lgs[0].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) - assert.Equal(t, int64(3), lgs[1].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000003`), lgs[1].Data) - th.assertHaveCanonical(t, 1, 1) - th.assertHaveCanonical(t, 3, 4) - th.assertDontHave(t, 2, 2) // 2 gets backfilled - - // Test scenario: multiple logs per block for many blocks (also after reorg). - // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) - // \ 2'(L1_2) <- 3 - // DB: 1, 2', 3' - // - Should save 4, 5, 6 blocks - // - Should obtain logs L1_3, L2_5, L1_6 - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(4)}) - require.NoError(t, err) - _, err = th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(5)}) - require.NoError(t, err) - // Create 4 - th.Client.Commit() - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(6)}) - require.NoError(t, err) - // Create 5 - th.Client.Commit() + // Test scenario: one log 2 block chain. + // Chain gen <- 1 <- 2 (L1) + // DB: 1 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + th.Client.Commit() - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(7), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(4, 6) - require.NoError(t, err) - require.Equal(t, 3, len(lgs)) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000004`), lgs[0].Data) - assert.Equal(t, th.EmitterAddress1, lgs[0].Address) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000005`), lgs[1].Data) - assert.Equal(t, th.EmitterAddress2, lgs[1].Address) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000006`), lgs[2].Data) - assert.Equal(t, th.EmitterAddress1, lgs[2].Address) - th.assertHaveCanonical(t, 1, 1) - th.assertDontHave(t, 2, 2) // 2 gets backfilled - th.assertHaveCanonical(t, 3, 6) - - // Test scenario: node down for exactly finality + 2 blocks - // Note we only backfill up to finalized - 1 blocks, because we need to save the - // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10 (L1_10) - // \ 2'(L1_2) <- 3 - // DB: 1, 2, 3, 4, 5, 6 - // - We expect block 7 to backfilled (treated as finalized) - // - Then block 8-10 to be handled block by block (treated as unfinalized). - for i := 7; i < 11; i++ { - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) - require.NoError(t, err) - th.Client.Commit() + // Polling should get us the L1 log. + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(3), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(2), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, th.EmitterAddress1, lgs[0].Address) + assert.Equal(t, latest.BlockHash, lgs[0].BlockHash) + assert.Equal(t, latest.BlockTimestamp, lgs[0].BlockTimestamp) + assert.Equal(t, hexutil.Encode(lgs[0].Topics[0]), EmitterABI.Events["Log1"].ID.String()) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), + lgs[0].Data) + + // Test scenario: single block reorg with log. + // Chain gen <- 1 <- 2 (L1_1) + // \ 2'(L1_2) <- 3 + // DB: 1, 2 + // - Detect a reorg, + // - Update the block 2's hash + // - Save L1' + // - L1_1 deleted + reorgedOutBlock, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(2)) + require.NoError(t, err) + lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) + require.NoError(t, err) + require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + // Create 2' + th.Client.Commit() + // Create 3 (we need a new block for us to do any polling and detect the reorg). + th.Client.Commit() + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(4), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(3), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + require.Equal(t, 1, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) + th.assertHaveCanonical(t, 1, 3) + + // Test scenario: reorg back to previous tip. + // Chain gen <- 1 <- 2 (L1_1) <- 3' (L1_3) <- 4 + // \ 2'(L1_2) <- 3 + require.NoError(t, th.Client.Fork(testutils.Context(t), reorgedOutBlock.Hash())) + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(3)}) + require.NoError(t, err) + // Create 3' + th.Client.Commit() + // Create 4 + th.Client.Commit() + // Mark block 1 as finalized + markBlockAsFinalized(t, th, 1) + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(5), newStart) + latest, err = th.ORM.SelectLatestBlock() + require.NoError(t, err) + assert.Equal(t, int64(4), latest.BlockNumber) + lgs, err = th.ORM.SelectLogsByBlockRange(1, 3) + require.NoError(t, err) + // We expect ONLY L1_1 and L1_3 since L1_2 is reorg'd out. + assert.Equal(t, 2, len(lgs)) + assert.Equal(t, int64(2), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) + assert.Equal(t, int64(3), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000003`), lgs[1].Data) + th.assertHaveCanonical(t, 1, 1) + th.assertHaveCanonical(t, 3, 4) + th.assertDontHave(t, 2, 2) // 2 gets backfilled + + // Test scenario: multiple logs per block for many blocks (also after reorg). + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) + // \ 2'(L1_2) <- 3 + // DB: 1, 2', 3' + // - Should save 4, 5, 6 blocks + // - Should obtain logs L1_3, L2_5, L1_6 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(4)}) + require.NoError(t, err) + _, err = th.Emitter2.EmitLog1(th.Owner, []*big.Int{big.NewInt(5)}) + require.NoError(t, err) + // Create 4 + th.Client.Commit() + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(6)}) + require.NoError(t, err) + // Create 5 + th.Client.Commit() + // Mark block 2 as finalized + markBlockAsFinalized(t, th, 3) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(7), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(4, 6) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000004`), lgs[0].Data) + assert.Equal(t, th.EmitterAddress1, lgs[0].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000005`), lgs[1].Data) + assert.Equal(t, th.EmitterAddress2, lgs[1].Address) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000006`), lgs[2].Data) + assert.Equal(t, th.EmitterAddress1, lgs[2].Address) + th.assertHaveCanonical(t, 1, 1) + th.assertDontHave(t, 2, 2) // 2 gets backfilled + th.assertHaveCanonical(t, 3, 6) + + // Test scenario: node down for exactly finality + 2 blocks + // Note we only backfill up to finalized - 1 blocks, because we need to save the + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10 (L1_10) + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6 + // - We expect block 7 to backfilled (treated as finalized) + // - Then block 8-10 to be handled block by block (treated as unfinalized). + for i := 7; i < 11; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Mark block 7 as finalized + markBlockAsFinalized(t, th, 7) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(11), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(7, 9) + require.NoError(t, err) + require.Equal(t, 3, len(lgs)) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000007`), lgs[0].Data) + assert.Equal(t, int64(7), lgs[0].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000008`), lgs[1].Data) + assert.Equal(t, int64(8), lgs[1].BlockNumber) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), lgs[2].Data) + assert.Equal(t, int64(9), lgs[2].BlockNumber) + th.assertDontHave(t, 7, 7) // Do not expect to save backfilled blocks. + th.assertHaveCanonical(t, 8, 10) + + // Test scenario large backfill (multiple batches) + // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10..16 + // \ 2'(L1_2) <- 3 + // DB: 1, 2, 3, 4, 5, 6, (backfilled 7), 8, 9, 10 + // - 11, 12, 13 backfilled in batch 1 + // - 14 backfilled in batch 2 + // - 15, 16, 17 to be treated as unfinalized + for i := 11; i < 18; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + // Mark block 14 as finalized + markBlockAsFinalized(t, th, 14) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(18), newStart) + lgs, err = th.ORM.SelectLogsByBlockRange(11, 17) + require.NoError(t, err) + assert.Equal(t, 7, len(lgs)) + th.assertHaveCanonical(t, 15, 16) + th.assertDontHave(t, 11, 14) // Do not expect to save backfilled blocks. + + // Verify that a custom block timestamp will get written to db correctly also + b, err = th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(17), b.NumberU64()) + require.Equal(t, uint64(170), b.Time()) + require.NoError(t, th.Client.AdjustTime(1*time.Hour)) + th.Client.Commit() + + b, err = th.Client.BlockByNumber(testutils.Context(t), nil) + require.NoError(t, err) + require.Equal(t, uint64(180+time.Hour.Seconds()), b.Time()) + }) } - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(11), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(7, 9) - require.NoError(t, err) - require.Equal(t, 3, len(lgs)) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000007`), lgs[0].Data) - assert.Equal(t, int64(7), lgs[0].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000008`), lgs[1].Data) - assert.Equal(t, int64(8), lgs[1].BlockNumber) - assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000009`), lgs[2].Data) - assert.Equal(t, int64(9), lgs[2].BlockNumber) - th.assertDontHave(t, 7, 7) // Do not expect to save backfilled blocks. - th.assertHaveCanonical(t, 8, 10) - - // Test scenario large backfill (multiple batches) - // Chain gen <- 1 <- 2 (L1_1) <- 3' L1_3 <- 4 <- 5 (L1_4, L2_5) <- 6 (L1_6) <- 7 (L1_7) <- 8 (L1_8) <- 9 (L1_9) <- 10..16 - // \ 2'(L1_2) <- 3 - // DB: 1, 2, 3, 4, 5, 6, (backfilled 7), 8, 9, 10 - // - 11, 12, 13 backfilled in batch 1 - // - 14 backfilled in batch 2 - // - 15, 16, 17 to be treated as unfinalized - for i := 11; i < 18; i++ { - _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) - require.NoError(t, err) - th.Client.Commit() +} + +func TestLogPoller_PollAndSaveLogsDeepReorg(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 3, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, } - newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) - assert.Equal(t, int64(18), newStart) - lgs, err = th.ORM.SelectLogsByBlockRange(11, 17) - require.NoError(t, err) - assert.Equal(t, 7, len(lgs)) - th.assertHaveCanonical(t, 15, 16) - th.assertDontHave(t, 11, 14) // Do not expect to save backfilled blocks. - // Verify that a custom block timestamp will get written to db correctly also - b, err = th.Client.BlockByNumber(testutils.Context(t), nil) - require.NoError(t, err) - require.Equal(t, uint64(17), b.NumberU64()) - require.Equal(t, uint64(170), b.Time()) - require.NoError(t, th.Client.AdjustTime(1*time.Hour)) - th.Client.Commit() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) - b, err = th.Client.BlockByNumber(testutils.Context(t), nil) - require.NoError(t, err) - require.Equal(t, uint64(180+time.Hour.Seconds()), b.Time()) + // Set up a log poller listening for log emitter logs. + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Test scenario: one log 2 block chain. + // Chain gen <- 1 <- 2 (L1_1) + // DB: 1 + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(1)}) + require.NoError(t, err) + th.Client.Commit() + markBlockAsFinalized(t, th, 1) + + // Polling should get us the L1 log. + newStart := th.PollAndSaveLogs(testutils.Context(t), 1) + assert.Equal(t, int64(3), newStart) + // Check that L1_1 has a proper data payload + lgs, err := th.ORM.SelectLogsByBlockRange(2, 2) + require.NoError(t, err) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000001`), lgs[0].Data) + + // Single block reorg and log poller not working for a while, mine blocks and progress with finalization + // Chain gen <- 1 <- 2 (L1_1) + // \ 2'(L1_2) <- 3 <- 4 <- 5 <- 6 (finalized on chain) <- 7 <- 8 <- 9 <- 10 + lca, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(1)) + require.NoError(t, err) + require.NoError(t, th.Client.Fork(testutils.Context(t), lca.Hash())) + // Create 2' + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(2)}) + require.NoError(t, err) + th.Client.Commit() + // Create 3-10 + for i := 3; i < 10; i++ { + _, err = th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + markBlockAsFinalized(t, th, 6) + + newStart = th.PollAndSaveLogs(testutils.Context(t), newStart) + assert.Equal(t, int64(10), newStart) + + // Expect L1_2 to be properly updated + lgs, err = th.ORM.SelectLogsByBlockRange(2, 2) + require.NoError(t, err) + assert.Equal(t, hexutil.MustDecode(`0x0000000000000000000000000000000000000000000000000000000000000002`), lgs[0].Data) + th.assertHaveCanonical(t, 1, 1) + th.assertDontHave(t, 2, 5) // These blocks are backfilled + th.assertHaveCanonical(t, 6, 10) + }) + } } func TestLogPoller_LoadFilters(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) filter1 := logpoller.Filter{"first Filter", []common.Hash{ EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0} @@ -802,7 +1109,7 @@ func TestLogPoller_LoadFilters(t *testing.T) { func TestLogPoller_GetBlocks_Range(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) err := th.LogPoller.RegisterFilter(logpoller.Filter{"GetBlocks Test", []common.Hash{ EmitterABI.Events["Log1"].ID, EmitterABI.Events["Log2"].ID}, []common.Address{th.EmitterAddress1, th.EmitterAddress2}, 0}, @@ -912,7 +1219,7 @@ func TestLogPoller_GetBlocks_Range(t *testing.T) { func TestGetReplayFromBlock(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) // Commit a few blocks for i := 0; i < 10; i++ { th.Client.Commit() @@ -973,7 +1280,7 @@ func TestLogPoller_DBErrorHandling(t *testing.T) { ec.Commit() ec.Commit() - lp := logpoller.NewLogPoller(o, client.NewSimulatedBackendClient(t, ec, chainID2), lggr, 1*time.Hour, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(o, client.NewSimulatedBackendClient(t, ec, chainID2), lggr, 1*time.Hour, false, 2, 3, 2, 1000) err = lp.Replay(ctx, 5) // block number too high require.ErrorContains(t, err, "Invalid replay block number") @@ -1076,7 +1383,7 @@ func TestTooManyLogResults(t *testing.T) { chainID := testutils.NewRandomEVMChainID() db := pgtest.NewSqlxDB(t) o := logpoller.NewORM(chainID, db, lggr, pgtest.NewQConfig(true)) - lp := logpoller.NewLogPoller(o, ec, lggr, 1*time.Hour, 2, 20, 10, 1000) + lp := logpoller.NewLogPoller(o, ec, lggr, 1*time.Hour, false, 2, 20, 10, 1000) expected := []int64{10, 5, 2, 1} clientErr := client.JsonError{ @@ -1153,62 +1460,206 @@ func TestTooManyLogResults(t *testing.T) { assert.Contains(t, crit[0].Message, "Too many log results in a single block") } -func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { - emittedLogs := 60 - finalityDepth := 10 +func Test_PollAndQueryFinalizedBlocks(t *testing.T) { + t.Parallel() ctx := testutils.Context(t) - th := SetupTH(t, int64(finalityDepth), 3, 2) + firstBatchLen := 3 + secondBatchLen := 5 - header, err := th.Client.HeaderByNumber(ctx, nil) - require.NoError(t, err) + th := SetupTH(t, true, 2, 3, 2) - genesisBlockTime := time.UnixMilli(int64(header.Time)) + eventSig := EmitterABI.Events["Log1"].ID + err := th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "GetBlocks Test", + EventSigs: []common.Hash{eventSig}, + Addresses: []common.Address{th.EmitterAddress1}}, + ) + require.NoError(t, err) - // Emit some logs in blocks - for i := 0; i < emittedLogs; i++ { - _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) - require.NoError(t, err) + // Generate block that will be finalized + for i := 0; i < firstBatchLen; i++ { + _, err1 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) th.Client.Commit() } - // First PollAndSave, no filters are registered - currentBlock := th.PollAndSaveLogs(ctx, 1) - - err = th.LogPoller.RegisterFilter(logpoller.Filter{ - Name: "Test Emitter", - EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, - Addresses: []common.Address{th.EmitterAddress1}, - }) - require.NoError(t, err) + // Mark current head as finalized + h := th.Client.Blockchain().CurrentHeader() + th.Client.Blockchain().SetFinalized(h) - // Emit blocks to cover finality depth, because backup always backfill up to the one block before last finalized - for i := 0; i < finalityDepth+1; i++ { + // Generate next blocks, not marked as finalized + for i := 0; i < secondBatchLen; i++ { + _, err1 := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err1) th.Client.Commit() } - // LogPoller should backfill entire history - th.LogPoller.BackupPollAndSaveLogs(ctx, 100) - require.NoError(t, err) + currentBlock := th.PollAndSaveLogs(ctx, 1) + require.Equal(t, int(currentBlock), firstBatchLen+secondBatchLen+2) - // Make sure that all logs are backfilled - logs, err := th.LogPoller.Logs( - 0, - currentBlock, - EmitterABI.Events["Log1"].ID, + finalizedLogs, err := th.LogPoller.LogsDataWordGreaterThan( + eventSig, th.EmitterAddress1, - pg.WithParentCtx(testutils.Context(t)), + 0, + common.Hash{}, + logpoller.Finalized, ) require.NoError(t, err) - require.Len(t, logs, emittedLogs) + require.Len(t, finalizedLogs, firstBatchLen) - // We should get all the logs by the block_timestamp - logs, err = th.LogPoller.LogsCreatedAfter( - EmitterABI.Events["Log1"].ID, + numberOfConfirmations := 1 + logsByConfs, err := th.LogPoller.LogsDataWordGreaterThan( + eventSig, th.EmitterAddress1, - genesisBlockTime, 0, - pg.WithParentCtx(testutils.Context(t)), + common.Hash{}, + logpoller.Confirmations(numberOfConfirmations), ) require.NoError(t, err) - require.Len(t, logs, emittedLogs) + require.Len(t, logsByConfs, firstBatchLen+secondBatchLen-numberOfConfirmations) +} + +func Test_PollAndSavePersistsFinalityInBlocks(t *testing.T) { + ctx := testutils.Context(t) + numberOfBlocks := 10 + + tests := []struct { + name string + useFinalityTag bool + finalityDepth int64 + expectedFinalizedBlock int64 + }{ + { + name: "using fixed finality depth", + useFinalityTag: false, + finalityDepth: 2, + expectedFinalizedBlock: int64(numberOfBlocks - 2), + }, + { + name: "setting last finalized block number to 0 if finality is too deep", + useFinalityTag: false, + finalityDepth: 20, + expectedFinalizedBlock: 0, + }, + { + name: "using finality from chain", + useFinalityTag: true, + finalityDepth: 0, + expectedFinalizedBlock: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.useFinalityTag, tt.finalityDepth, 3, 2) + // Mark first block as finalized + h := th.Client.Blockchain().CurrentHeader() + th.Client.Blockchain().SetFinalized(h) + + // Create a couple of blocks + for i := 0; i < numberOfBlocks-1; i++ { + th.Client.Commit() + } + + th.PollAndSaveLogs(ctx, 1) + + latestBlock, err := th.ORM.SelectLatestBlock() + require.NoError(t, err) + require.Equal(t, int64(numberOfBlocks), latestBlock.BlockNumber) + require.Equal(t, tt.expectedFinalizedBlock, latestBlock.FinalizedBlockNumber) + }) + } +} + +func Test_CreatedAfterQueriesWithBackfill(t *testing.T) { + emittedLogs := 60 + ctx := testutils.Context(t) + + tests := []struct { + name string + finalityDepth int64 + finalityTag bool + }{ + { + name: "fixed finality depth without finality tag", + finalityDepth: 10, + finalityTag: false, + }, + { + name: "chain finality in use", + finalityDepth: 0, + finalityTag: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + th := SetupTH(t, tt.finalityTag, tt.finalityDepth, 3, 2) + + header, err := th.Client.HeaderByNumber(ctx, nil) + require.NoError(t, err) + + genesisBlockTime := time.UnixMilli(int64(header.Time)) + + // Emit some logs in blocks + for i := 0; i < emittedLogs; i++ { + _, err := th.Emitter1.EmitLog1(th.Owner, []*big.Int{big.NewInt(int64(i))}) + require.NoError(t, err) + th.Client.Commit() + } + + // First PollAndSave, no filters are registered + currentBlock := th.PollAndSaveLogs(ctx, 1) + + err = th.LogPoller.RegisterFilter(logpoller.Filter{ + Name: "Test Emitter", + EventSigs: []common.Hash{EmitterABI.Events["Log1"].ID}, + Addresses: []common.Address{th.EmitterAddress1}, + }) + require.NoError(t, err) + + // Emit blocks to cover finality depth, because backup always backfill up to the one block before last finalized + for i := 0; i < int(tt.finalityDepth)+1; i++ { + bh := th.Client.Commit() + markBlockAsFinalizedByHash(t, th, bh) + } + + // LogPoller should backfill entire history + th.LogPoller.BackupPollAndSaveLogs(ctx, 100) + require.NoError(t, err) + + // Make sure that all logs are backfilled + logs, err := th.LogPoller.Logs( + 0, + currentBlock, + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) + + // We should get all the logs by the block_timestamp + logs, err = th.LogPoller.LogsCreatedAfter( + EmitterABI.Events["Log1"].ID, + th.EmitterAddress1, + genesisBlockTime, + 0, + pg.WithParentCtx(testutils.Context(t)), + ) + require.NoError(t, err) + require.Len(t, logs, emittedLogs) + }) + } +} + +func markBlockAsFinalized(t *testing.T, th TestHarness, blockNumber int64) { + b, err := th.Client.BlockByNumber(testutils.Context(t), big.NewInt(blockNumber)) + require.NoError(t, err) + th.Client.Blockchain().SetFinalized(b.Header()) +} + +func markBlockAsFinalizedByHash(t *testing.T, th TestHarness, blockHash common.Hash) { + b, err := th.Client.BlockByHash(testutils.Context(t), blockHash) + require.NoError(t, err) + th.Client.Blockchain().SetFinalized(b.Header()) } diff --git a/core/chains/evm/logpoller/mocks/log_poller.go b/core/chains/evm/logpoller/mocks/log_poller.go index 59526cd19ea..f4357341646 100644 --- a/core/chains/evm/logpoller/mocks/log_poller.go +++ b/core/chains/evm/logpoller/mocks/log_poller.go @@ -588,39 +588,6 @@ func (_m *LogPoller) LogsDataWordRange(eventSig common.Hash, address common.Addr return r0, r1 } -// LogsUntilBlockHashDataWordGreaterThan provides a mock function with given fields: eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts -func (_m *LogPoller) LogsUntilBlockHashDataWordGreaterThan(eventSig common.Hash, address common.Address, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]logpoller.Log, error) { - _va := make([]interface{}, len(qopts)) - for _i := range qopts { - _va[_i] = qopts[_i] - } - var _ca []interface{} - _ca = append(_ca, eventSig, address, wordIndex, wordValueMin, untilBlockHash) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []logpoller.Log - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, ...pg.QOpt) ([]logpoller.Log, error)); ok { - return rf(eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts...) - } - if rf, ok := ret.Get(0).(func(common.Hash, common.Address, int, common.Hash, common.Hash, ...pg.QOpt) []logpoller.Log); ok { - r0 = rf(eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]logpoller.Log) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash, common.Address, int, common.Hash, common.Hash, ...pg.QOpt) error); ok { - r1 = rf(eventSig, address, wordIndex, wordValueMin, untilBlockHash, qopts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // LogsWithSigs provides a mock function with given fields: start, end, eventSigs, address, qopts func (_m *LogPoller) LogsWithSigs(start int64, end int64, eventSigs []common.Hash, address common.Address, qopts ...pg.QOpt) ([]logpoller.Log, error) { _va := make([]interface{}, len(qopts)) diff --git a/core/chains/evm/logpoller/models.go b/core/chains/evm/logpoller/models.go index 2848239e67f..9c55786777c 100644 --- a/core/chains/evm/logpoller/models.go +++ b/core/chains/evm/logpoller/models.go @@ -16,9 +16,10 @@ type LogPollerBlock struct { EvmChainId *utils.Big BlockHash common.Hash // Note geth uses int64 internally https://github.com/ethereum/go-ethereum/blob/f66f1a16b3c480d3a43ac7e8a09ab3e362e96ae4/eth/filters/api.go#L340 - BlockNumber int64 - BlockTimestamp time.Time - CreatedAt time.Time + BlockNumber int64 + BlockTimestamp time.Time + FinalizedBlockNumber int64 + CreatedAt time.Time } // Log represents an EVM log. diff --git a/core/chains/evm/logpoller/observability.go b/core/chains/evm/logpoller/observability.go index 5935c25637a..7f54fa9f09a 100644 --- a/core/chains/evm/logpoller/observability.go +++ b/core/chains/evm/logpoller/observability.go @@ -78,9 +78,9 @@ func (o *ObservedORM) InsertLogs(logs []Log, qopts ...pg.QOpt) error { }) } -func (o *ObservedORM) InsertBlock(h common.Hash, n int64, t time.Time, qopts ...pg.QOpt) error { +func (o *ObservedORM) InsertBlock(hash common.Hash, blockNumber int64, blockTimestamp time.Time, lastFinalizedBlock int64, qopts ...pg.QOpt) error { return withObservedExec(o, "InsertBlock", func() error { - return o.ORM.InsertBlock(h, n, t, qopts...) + return o.ORM.InsertBlock(hash, blockNumber, blockTimestamp, lastFinalizedBlock, qopts...) }) } @@ -222,12 +222,6 @@ func (o *ObservedORM) SelectLogsDataWordGreaterThan(address common.Address, even }) } -func (o *ObservedORM) SelectLogsUntilBlockHashDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - return withObservedQueryAndResults(o, "SelectLogsUntilBlockHashDataWordGreaterThan", func() ([]Log, error) { - return o.ORM.SelectLogsUntilBlockHashDataWordGreaterThan(address, eventSig, wordIndex, wordValueMin, untilBlockHash, qopts...) - }) -} - func (o *ObservedORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventSig common.Hash, topicIndex int, topicValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) { return withObservedQueryAndResults(o, "SelectIndexedLogsTopicGreaterThan", func() ([]Log, error) { return o.ORM.SelectIndexedLogsTopicGreaterThan(address, eventSig, topicIndex, topicValueMin, confs, qopts...) diff --git a/core/chains/evm/logpoller/observability_test.go b/core/chains/evm/logpoller/observability_test.go index 1f0c0abfeaf..0d3eadf47d7 100644 --- a/core/chains/evm/logpoller/observability_test.go +++ b/core/chains/evm/logpoller/observability_test.go @@ -22,84 +22,81 @@ import ( func TestMultipleMetricsArePublished(t *testing.T) { ctx := testutils.Context(t) - lp := createObservedPollLogger(t, 100) - require.Equal(t, 0, testutil.CollectAndCount(lp.queryDuration)) - - _, _ = lp.SelectIndexedLogs(common.Address{}, common.Hash{}, 1, []common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsByBlockRange(0, 1, common.Address{}, common.Hash{}, 1, []common.Hash{}, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsTopicGreaterThan(common.Address{}, common.Hash{}, 1, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsTopicRange(common.Address{}, common.Hash{}, 1, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsWithSigsExcluding(common.Hash{}, common.Hash{}, 1, common.Address{}, 0, 1, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsDataWordRange(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsCreatedAfter(common.Address{}, common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{{}}, []common.Hash{{}}, 1, pg.WithParentCtx(ctx)) - _, _ = lp.SelectIndexedLogsCreatedAfter(common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) - _, _ = lp.SelectLogsUntilBlockHashDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, pg.WithParentCtx(ctx)) - _ = lp.InsertLogs([]Log{}, pg.WithParentCtx(ctx)) - _ = lp.InsertBlock(common.Hash{}, 0, time.Now(), pg.WithParentCtx(ctx)) - - require.Equal(t, 14, testutil.CollectAndCount(lp.queryDuration)) - require.Equal(t, 10, testutil.CollectAndCount(lp.datasetSize)) - resetMetrics(*lp) + orm := createObservedORM(t, 100) + t.Cleanup(func() { resetMetrics(*orm) }) + require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) + + _, _ = orm.SelectIndexedLogs(common.Address{}, common.Hash{}, 1, []common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsByBlockRange(0, 1, common.Address{}, common.Hash{}, 1, []common.Hash{}, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsTopicGreaterThan(common.Address{}, common.Hash{}, 1, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsTopicRange(common.Address{}, common.Hash{}, 1, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsWithSigsExcluding(common.Hash{}, common.Hash{}, 1, common.Address{}, 0, 1, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsDataWordRange(common.Address{}, common.Hash{}, 0, common.Hash{}, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsDataWordGreaterThan(common.Address{}, common.Hash{}, 0, common.Hash{}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLogsCreatedAfter(common.Address{}, common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) + _, _ = orm.SelectLatestLogEventSigsAddrsWithConfs(0, []common.Address{{}}, []common.Hash{{}}, 1, pg.WithParentCtx(ctx)) + _, _ = orm.SelectIndexedLogsCreatedAfter(common.Address{}, common.Hash{}, 1, []common.Hash{}, time.Now(), 0, pg.WithParentCtx(ctx)) + _ = orm.InsertLogs([]Log{}, pg.WithParentCtx(ctx)) + _ = orm.InsertBlock(common.Hash{}, 1, time.Now(), 0, pg.WithParentCtx(ctx)) + + require.Equal(t, 13, testutil.CollectAndCount(orm.queryDuration)) + require.Equal(t, 10, testutil.CollectAndCount(orm.datasetSize)) } func TestShouldPublishDurationInCaseOfError(t *testing.T) { ctx := testutils.Context(t) - lp := createObservedPollLogger(t, 200) - require.Equal(t, 0, testutil.CollectAndCount(lp.queryDuration)) + orm := createObservedORM(t, 200) + t.Cleanup(func() { resetMetrics(*orm) }) + require.Equal(t, 0, testutil.CollectAndCount(orm.queryDuration)) - _, err := lp.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) + _, err := orm.SelectLatestLogByEventSigWithConfs(common.Hash{}, common.Address{}, 0, pg.WithParentCtx(ctx)) require.Error(t, err) - require.Equal(t, 1, testutil.CollectAndCount(lp.queryDuration)) - require.Equal(t, 1, counterFromHistogramByLabels(t, lp.queryDuration, "200", "SelectLatestLogByEventSigWithConfs")) - - resetMetrics(*lp) + require.Equal(t, 1, testutil.CollectAndCount(orm.queryDuration)) + require.Equal(t, 1, counterFromHistogramByLabels(t, orm.queryDuration, "200", "SelectLatestLogByEventSigWithConfs")) } func TestMetricsAreProperlyPopulatedWithLabels(t *testing.T) { - lp := createObservedPollLogger(t, 420) + orm := createObservedORM(t, 420) + t.Cleanup(func() { resetMetrics(*orm) }) expectedCount := 9 expectedSize := 2 for i := 0; i < expectedCount; i++ { - _, err := withObservedQueryAndResults(lp, "query", func() ([]string, error) { return []string{"value1", "value2"}, nil }) + _, err := withObservedQueryAndResults(orm, "query", func() ([]string, error) { return []string{"value1", "value2"}, nil }) require.NoError(t, err) } - require.Equal(t, expectedCount, counterFromHistogramByLabels(t, lp.queryDuration, "420", "query")) - require.Equal(t, expectedSize, counterFromGaugeByLabels(lp.datasetSize, "420", "query")) - - require.Equal(t, 0, counterFromHistogramByLabels(t, lp.queryDuration, "420", "other_query")) - require.Equal(t, 0, counterFromHistogramByLabels(t, lp.queryDuration, "5", "query")) + require.Equal(t, expectedCount, counterFromHistogramByLabels(t, orm.queryDuration, "420", "query")) + require.Equal(t, expectedSize, counterFromGaugeByLabels(orm.datasetSize, "420", "query")) - require.Equal(t, 0, counterFromGaugeByLabels(lp.datasetSize, "420", "other_query")) - require.Equal(t, 0, counterFromGaugeByLabels(lp.datasetSize, "5", "query")) + require.Equal(t, 0, counterFromHistogramByLabels(t, orm.queryDuration, "420", "other_query")) + require.Equal(t, 0, counterFromHistogramByLabels(t, orm.queryDuration, "5", "query")) - resetMetrics(*lp) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "420", "other_query")) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "5", "query")) } func TestNotPublishingDatasetSizeInCaseOfError(t *testing.T) { - lp := createObservedPollLogger(t, 420) + orm := createObservedORM(t, 420) - _, err := withObservedQueryAndResults(lp, "errorQuery", func() ([]string, error) { return nil, fmt.Errorf("error") }) + _, err := withObservedQueryAndResults(orm, "errorQuery", func() ([]string, error) { return nil, fmt.Errorf("error") }) require.Error(t, err) - require.Equal(t, 1, counterFromHistogramByLabels(t, lp.queryDuration, "420", "errorQuery")) - require.Equal(t, 0, counterFromGaugeByLabels(lp.datasetSize, "420", "errorQuery")) + require.Equal(t, 1, counterFromHistogramByLabels(t, orm.queryDuration, "420", "errorQuery")) + require.Equal(t, 0, counterFromGaugeByLabels(orm.datasetSize, "420", "errorQuery")) } func TestMetricsAreProperlyPopulatedForWrites(t *testing.T) { - lp := createObservedPollLogger(t, 420) - require.NoError(t, withObservedExec(lp, "execQuery", func() error { return nil })) - require.Error(t, withObservedExec(lp, "execQuery", func() error { return fmt.Errorf("error") })) + orm := createObservedORM(t, 420) + require.NoError(t, withObservedExec(orm, "execQuery", func() error { return nil })) + require.Error(t, withObservedExec(orm, "execQuery", func() error { return fmt.Errorf("error") })) - require.Equal(t, 2, counterFromHistogramByLabels(t, lp.queryDuration, "420", "execQuery")) + require.Equal(t, 2, counterFromHistogramByLabels(t, orm.queryDuration, "420", "execQuery")) } -func createObservedPollLogger(t *testing.T, chainId int64) *ObservedORM { +func createObservedORM(t *testing.T, chainId int64) *ObservedORM { lggr, _ := logger.TestLoggerObserved(t, zapcore.ErrorLevel) db := pgtest.NewSqlxDB(t) return NewObservedORM( diff --git a/core/chains/evm/logpoller/orm.go b/core/chains/evm/logpoller/orm.go index f8d0e618762..06f4acbb4f1 100644 --- a/core/chains/evm/logpoller/orm.go +++ b/core/chains/evm/logpoller/orm.go @@ -22,7 +22,7 @@ import ( type ORM interface { Q() pg.Q InsertLogs(logs []Log, qopts ...pg.QOpt) error - InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, qopts ...pg.QOpt) error + InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, lastFinalizedBlockNumber int64, qopts ...pg.QOpt) error InsertFilter(filter Filter, qopts ...pg.QOpt) error LoadFilters(qopts ...pg.QOpt) (map[string]Filter, error) @@ -53,7 +53,6 @@ type ORM interface { SelectIndexedLogsByTxHash(eventSig common.Hash, txHash common.Hash, qopts ...pg.QOpt) ([]Log, error) SelectLogsDataWordRange(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin, wordValueMax common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) SelectLogsDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, confs Confirmations, qopts ...pg.QOpt) ([]Log, error) - SelectLogsUntilBlockHashDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) } type DbORM struct { @@ -76,19 +75,20 @@ func (o *DbORM) Q() pg.Q { } // InsertBlock is idempotent to support replays. -func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, qopts ...pg.QOpt) error { +func (o *DbORM) InsertBlock(blockHash common.Hash, blockNumber int64, blockTimestamp time.Time, finalizedBlock int64, qopts ...pg.QOpt) error { args, err := newQueryArgs(o.chainID). withCustomHashArg("block_hash", blockHash). withCustomArg("block_number", blockNumber). withCustomArg("block_timestamp", blockTimestamp). + withCustomArg("finalized_block_number", finalizedBlock). toArgs() if err != nil { return err } return o.q.WithOpts(qopts...).ExecQNamed(` INSERT INTO evm.log_poller_blocks - (evm_chain_id, block_hash, block_number, block_timestamp, created_at) - VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, NOW()) + (evm_chain_id, block_hash, block_number, block_timestamp, finalized_block_number, created_at) + VALUES (:evm_chain_id, :block_hash, :block_number, :block_timestamp, :finalized_block_number, NOW()) ON CONFLICT DO NOTHING`, args) } @@ -183,7 +183,7 @@ func (o *DbORM) SelectLatestLogByEventSigWithConfs(eventSig common.Hash, address AND event_sig = :event_sig AND address = :address AND block_number <= %s - ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index) DESC LIMIT 1`, nestedBlockNumberQuery(confs)) var l Log if err := o.q.WithOpts(qopts...).GetNamed(query, &l, args); err != nil { return nil, err @@ -330,7 +330,7 @@ func (o *DbORM) SelectLogsCreatedAfter(address common.Address, eventSig common.H AND event_sig = :event_sig AND block_timestamp > :block_timestamp_after AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { @@ -408,7 +408,7 @@ func (o *DbORM) SelectLatestLogEventSigsAddrsWithConfs(fromBlock int64, addresse AND block_number <= %s GROUP BY event_sig, address ) - ORDER BY block_number ASC`, nestedBlockNumberQuery()) + ORDER BY block_number ASC`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, errors.Wrap(err, "failed to execute query") @@ -433,7 +433,7 @@ func (o *DbORM) SelectLatestBlockByEventSigsAddrsWithConfs(fromBlock int64, even AND event_sig = ANY(:event_sig_array) AND address = ANY(:address_array) AND block_number > :start_block - AND block_number <= %s`, nestedBlockNumberQuery()) + AND block_number <= %s`, nestedBlockNumberQuery(confs)) var blockNumber int64 if err := o.q.WithOpts(qopts...).GetNamed(query, &blockNumber, args); err != nil { return 0, err @@ -458,7 +458,7 @@ func (o *DbORM) SelectLogsDataWordRange(address common.Address, eventSig common. AND substring(data from 32*:word_index+1 for 32) >= :word_value_min AND substring(data from 32*:word_index+1 for 32) <= :word_value_max AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -482,7 +482,7 @@ func (o *DbORM) SelectLogsDataWordGreaterThan(address common.Address, eventSig c AND event_sig = :event_sig AND substring(data from 32*:word_index+1 for 32) >= :word_value_min AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -506,7 +506,7 @@ func (o *DbORM) SelectIndexedLogsTopicGreaterThan(address common.Address, eventS AND event_sig = :event_sig AND topics[:topic_index] >= :topic_value_min AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -532,7 +532,7 @@ func (o *DbORM) SelectIndexedLogsTopicRange(address common.Address, eventSig com AND topics[:topic_index] >= :topic_value_min AND topics[:topic_index] <= :topic_value_max AND block_number <= %s - ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery()) + ORDER BY (evm.logs.block_number, evm.logs.log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -556,7 +556,7 @@ func (o *DbORM) SelectIndexedLogs(address common.Address, eventSig common.Hash, AND event_sig = :event_sig AND topics[:topic_index] = ANY(:topic_values) AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err := o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { return nil, err @@ -610,7 +610,7 @@ func (o *DbORM) SelectIndexedLogsCreatedAfter(address common.Address, eventSig c AND topics[:topic_index] = ANY(:topic_values) AND block_timestamp > :block_timestamp_after AND block_number <= %s - ORDER BY (block_number, log_index)`, nestedBlockNumberQuery()) + ORDER BY (block_number, log_index)`, nestedBlockNumberQuery(confs)) var logs []Log if err = o.q.WithOpts(qopts...).SelectNamed(&logs, query, args); err != nil { @@ -655,7 +655,7 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topic return nil, err } - nestedQuery := nestedBlockNumberQuery() + nestedQuery := nestedBlockNumberQuery(confs) query := fmt.Sprintf(` SELECT * FROM evm.logs WHERE evm_chain_id = :evm_chain_id @@ -681,36 +681,20 @@ func (o *DbORM) SelectIndexedLogsWithSigsExcluding(sigA, sigB common.Hash, topic return logs, nil } -func (o *DbORM) SelectLogsUntilBlockHashDataWordGreaterThan(address common.Address, eventSig common.Hash, wordIndex int, wordValueMin common.Hash, untilBlockHash common.Hash, qopts ...pg.QOpt) ([]Log, error) { - var logs []Log - q := o.q.WithOpts(qopts...) - err := q.Transaction(func(tx pg.Queryer) error { - // We want to mimic the behaviour of the ETH RPC which errors if blockhash not found. - var block LogPollerBlock - if err := tx.Get(&block, - `SELECT * FROM evm.log_poller_blocks - WHERE evm_chain_id = $1 AND block_hash = $2`, utils.NewBig(o.chainID), untilBlockHash); err != nil { - return err - } - return q.Select(&logs, - `SELECT * FROM evm.logs - WHERE evm_chain_id = $1 - AND address = $2 AND event_sig = $3 - AND substring(data from 32*$4+1 for 32) >= $5 - AND block_number <= $6 - ORDER BY (block_number, log_index)`, utils.NewBig(o.chainID), address, eventSig.Bytes(), wordIndex, wordValueMin.Bytes(), block.BlockNumber) - }) - if err != nil { - return nil, err +func nestedBlockNumberQuery(confs Confirmations) string { + if confs == Finalized { + return ` + (SELECT finalized_block_number + FROM evm.log_poller_blocks + WHERE evm_chain_id = :evm_chain_id + ORDER BY block_number DESC LIMIT 1) ` } - return logs, nil -} - -func nestedBlockNumberQuery() string { + // Intentionally wrap with greatest() function and don't return negative block numbers when :confs > :block_number + // It doesn't impact logic of the outer query, because block numbers are never less or equal to 0 (guarded by log_poller_blocks_block_number_check) return ` - (SELECT COALESCE(block_number, 0) + (SELECT greatest(block_number - :confs, 0) FROM evm.log_poller_blocks WHERE evm_chain_id = :evm_chain_id - ORDER BY block_number DESC LIMIT 1) - :confs` + ORDER BY block_number DESC LIMIT 1) ` } diff --git a/core/chains/evm/logpoller/orm_test.go b/core/chains/evm/logpoller/orm_test.go index 1f43586548b..380d8528d27 100644 --- a/core/chains/evm/logpoller/orm_test.go +++ b/core/chains/evm/logpoller/orm_test.go @@ -47,7 +47,7 @@ func GenLogWithTimestamp(chainID *big.Int, logIndex int64, blockNum int64, block func TestLogPoller_Batching(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) var logs []logpoller.Log // Inserts are limited to 65535 parameters. A log being 10 parameters this results in // a maximum of 6553 log inserts per tx. As inserting more than 6553 would result in @@ -63,7 +63,7 @@ func TestLogPoller_Batching(t *testing.T) { } func TestORM_GetBlocks_From_Range(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM // Insert many blocks and read them back together blocks := []block{ @@ -94,7 +94,7 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { }, } for _, b := range blocks { - require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Unix(b.timestamp, 0).UTC())) + require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Unix(b.timestamp, 0).UTC(), 0)) } var blockNumbers []int64 @@ -118,7 +118,7 @@ func TestORM_GetBlocks_From_Range(t *testing.T) { } func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM // Insert many blocks and read them back together var recentBlocks []block @@ -126,7 +126,7 @@ func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { recentBlocks = append(recentBlocks, block{number: int64(i), hash: common.HexToHash(fmt.Sprintf("0x%d", i))}) } for _, b := range recentBlocks { - require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Now())) + require.NoError(t, o1.InsertBlock(b.hash, b.number, time.Now(), 0)) } var blockNumbers []int64 @@ -150,11 +150,11 @@ func TestORM_GetBlocks_From_Range_Recent_Blocks(t *testing.T) { } func TestORM(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM o2 := th.ORM2 // Insert and read back a block. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) b, err := o1.SelectBlockByHash(common.HexToHash("0x1234")) require.NoError(t, err) assert.Equal(t, b.BlockNumber, int64(10)) @@ -162,8 +162,8 @@ func TestORM(t *testing.T) { assert.Equal(t, b.EvmChainId.String(), th.ChainID.String()) // Insert blocks from a different chain - require.NoError(t, o2.InsertBlock(common.HexToHash("0x1234"), 11, time.Now())) - require.NoError(t, o2.InsertBlock(common.HexToHash("0x1235"), 12, time.Now())) + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o2.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) b2, err := o2.SelectBlockByHash(common.HexToHash("0x1234")) require.NoError(t, err) assert.Equal(t, b2.BlockNumber, int64(11)) @@ -310,7 +310,7 @@ func TestORM(t *testing.T) { require.Error(t, err) assert.True(t, errors.Is(err, sql.ErrNoRows)) // With block 10, only 0 confs should work - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 10, time.Now(), 0)) log, err := o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) require.NoError(t, err) assert.Equal(t, int64(10), log.BlockNumber) @@ -319,8 +319,8 @@ func TestORM(t *testing.T) { assert.True(t, errors.Is(err, sql.ErrNoRows)) // With block 12, anything <=2 should work require.NoError(t, o1.DeleteBlocksAfter(10)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 11, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 12, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 11, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 12, time.Now(), 0)) _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 0) require.NoError(t, err) _, err = o1.SelectLatestLogByEventSigWithConfs(topic, common.HexToAddress("0x1234"), 1) @@ -332,9 +332,9 @@ func TestORM(t *testing.T) { assert.True(t, errors.Is(err, sql.ErrNoRows)) // Required for confirmations to work - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 13, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 14, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 15, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 13, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 14, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 15, time.Now(), 0)) // Latest log for topic for addr "0x1234" is @ block 11 lgs, err := o1.SelectLatestLogEventSigsAddrsWithConfs(0 /* startBlock */, []common.Address{common.HexToAddress("0x1234")}, []common.Hash{topic}, 0) @@ -368,8 +368,8 @@ func TestORM(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, len(lgs)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 16, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1238"), 17, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 16, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1238"), 17, time.Now(), 0)) filter0 := logpoller.Filter{ Name: "permanent retention filter", @@ -447,11 +447,11 @@ func insertLogsTopicValueRange(t *testing.T, chainID *big.Int, o *logpoller.DbOR } func TestORM_IndexedLogs(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) insertLogsTopicValueRange(t, th.ChainID, o1, addr, 1, eventSig, 1, 3) insertLogsTopicValueRange(t, th.ChainID, o1, addr, 2, eventSig, 4, 4) // unconfirmed @@ -497,24 +497,24 @@ func TestORM_IndexedLogs(t *testing.T) { assert.Equal(t, 3, len(lgs)) // Check confirmations work as expected. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) require.NoError(t, err) assert.Equal(t, 0, len(lgs)) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x3"), 3, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) lgs, err = o1.SelectIndexedLogsTopicRange(addr, eventSig, 1, logpoller.EvmWord(4), logpoller.EvmWord(4), 1) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) } func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { - th := SetupTH(t, 0, 3, 2) + th := SetupTH(t, false, 0, 3, 2) o1 := th.ORM eventSig := common.HexToHash("0x1599") txHash := common.HexToHash("0x1888") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) logs := []logpoller.Log{ { EvmChainId: utils.NewBig(th.ChainID), @@ -574,11 +574,11 @@ func TestORM_SelectIndexedLogsByTxHash(t *testing.T) { } func TestORM_DataWords(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM eventSig := common.HexToHash("0x1599") addr := common.HexToAddress("0x1234") - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) require.NoError(t, o1.InsertLogs([]logpoller.Log{ { EvmChainId: utils.NewBig(th.ChainID), @@ -624,7 +624,7 @@ func TestORM_DataWords(t *testing.T) { require.NoError(t, err) assert.Equal(t, 0, len(lgs)) // Confirm it, then can query. - require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) lgs, err = o1.SelectLogsDataWordRange(addr, eventSig, 1, logpoller.EvmWord(3), logpoller.EvmWord(3), 0) require.NoError(t, err) assert.Equal(t, 1, len(lgs)) @@ -634,25 +634,10 @@ func TestORM_DataWords(t *testing.T) { lgs, err = o1.SelectLogsDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), 0) require.NoError(t, err) assert.Equal(t, 2, len(lgs)) - - // Unknown hash should an error - lgs, err = o1.SelectLogsUntilBlockHashDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), common.HexToHash("0x3")) - require.Error(t, err) - assert.Equal(t, 0, len(lgs)) - - // 1 block should include first log - lgs, err = o1.SelectLogsUntilBlockHashDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), common.HexToHash("0x1")) - require.NoError(t, err) - assert.Equal(t, 1, len(lgs)) - - // 2 block should include both - lgs, err = o1.SelectLogsUntilBlockHashDataWordGreaterThan(addr, eventSig, 0, logpoller.EvmWord(1), common.HexToHash("0x2")) - require.NoError(t, err) - assert.Equal(t, 2, len(lgs)) } func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM // Insert logs on different topics, should be able to read them @@ -746,10 +731,10 @@ func TestORM_SelectLogsWithSigsByBlockRangeFilter(t *testing.T) { } func TestORM_DeleteBlocksBefore(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) o1 := th.ORM - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 1, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 2, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1234"), 1, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1235"), 2, time.Now(), 0)) require.NoError(t, o1.DeleteBlocksBefore(1)) // 1 should be gone. _, err := o1.SelectBlockByNumber(1) @@ -758,8 +743,8 @@ func TestORM_DeleteBlocksBefore(t *testing.T) { require.NoError(t, err) assert.Equal(t, int64(2), b.BlockNumber) // Clear multiple - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 3, time.Now())) - require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 4, time.Now())) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1236"), 3, time.Now(), 0)) + require.NoError(t, o1.InsertBlock(common.HexToHash("0x1237"), 4, time.Now(), 0)) require.NoError(t, o1.DeleteBlocksBefore(3)) _, err = o1.SelectBlockByNumber(2) require.Equal(t, err, sql.ErrNoRows) @@ -769,7 +754,7 @@ func TestORM_DeleteBlocksBefore(t *testing.T) { func TestLogPoller_Logs(t *testing.T) { t.Parallel() - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := common.HexToAddress("0x2ab9a2Dc53736b361b72d900CdF9F78F9406fbbb") @@ -817,7 +802,7 @@ func TestLogPoller_Logs(t *testing.T) { } func BenchmarkLogs(b *testing.B) { - th := SetupTH(b, 2, 3, 2) + th := SetupTH(b, false, 2, 3, 2) o := th.ORM var lgs []logpoller.Log addr := common.HexToAddress("0x1234") @@ -843,7 +828,7 @@ func BenchmarkLogs(b *testing.B) { } func TestSelectLogsWithSigsExcluding(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) orm := th.ORM addressA := common.HexToAddress("0x11111") addressB := common.HexToAddress("0x22222") @@ -886,7 +871,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("requestID-B1"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x1"), 1, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x1"), 1, time.Now(), 0)) //Get any requestSigA from addressA that do not have a equivalent responseSigA logs, err := orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) @@ -915,7 +900,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("responseID-A1"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x2"), 2, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x2"), 2, time.Now(), 0)) //Should return nothing as requestID-A1 has been fulfilled logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigA, responseSigA, 1, addressA, 0, 3, 0) @@ -966,7 +951,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { Data: []byte("requestID-C3"), }, })) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x3"), 3, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x3"), 3, time.Now(), 0)) //Get all unfulfilled requests from addressC, match on topic index 3 logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 4, 0) @@ -1026,13 +1011,13 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { require.NoError(t, err) require.Len(t, logs, 0) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x4"), 4, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x5"), 5, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x6"), 6, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x7"), 7, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x8"), 8, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x9"), 9, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x10"), 10, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x4"), 4, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x5"), 5, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x6"), 6, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x7"), 7, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x8"), 8, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x9"), 9, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x10"), 10, time.Now(), 0)) //Fulfill requestID-C3 require.NoError(t, orm.InsertLogs([]logpoller.Log{ @@ -1062,9 +1047,9 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { require.Equal(t, logs[0].Data, []byte("requestID-C1")) //Insert 3 more blocks so that the requestID-C1 has enough confirmations - require.NoError(t, orm.InsertBlock(common.HexToHash("0x11"), 11, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x12"), 12, time.Now())) - require.NoError(t, orm.InsertBlock(common.HexToHash("0x13"), 13, time.Now())) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x11"), 11, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x12"), 12, time.Now(), 0)) + require.NoError(t, orm.InsertBlock(common.HexToHash("0x13"), 13, time.Now(), 0)) logs, err = orm.SelectIndexedLogsWithSigsExcluding(requestSigB, responseSigB, 3, addressC, 0, 10, 0) require.NoError(t, err) @@ -1089,7 +1074,7 @@ func TestSelectLogsWithSigsExcluding(t *testing.T) { } func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) event1 := EmitterABI.Events["Log1"].ID event2 := EmitterABI.Events["Log2"].ID address1 := utils.RandomAddress() @@ -1101,7 +1086,7 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { GenLog(th.ChainID, 2, 2, utils.RandomAddress().String(), event2[:], address2), GenLog(th.ChainID, 2, 3, utils.RandomAddress().String(), event2[:], address2), })) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, time.Now())) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, time.Now(), 1)) tests := []struct { name string @@ -1135,6 +1120,14 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { fromBlock: 0, expectedBlockNumber: 1, }, + { + name: "only finalized log is picked", + events: []common.Hash{event1, event2}, + addrs: []common.Address{address1, address2}, + confs: logpoller.Finalized, + fromBlock: 0, + expectedBlockNumber: 1, + }, { name: "picks max block from two events", events: []common.Hash{event1, event2}, @@ -1178,23 +1171,23 @@ func TestSelectLatestBlockNumberEventSigsAddrsWithConfs(t *testing.T) { } func TestSelectLogsCreatedAfter(t *testing.T) { - th := SetupTH(t, 2, 3, 2) + th := SetupTH(t, false, 2, 3, 2) event := EmitterABI.Events["Log1"].ID address := utils.RandomAddress() - past := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) - now := time.Date(2020, 1, 1, 12, 12, 12, 0, time.UTC) - future := time.Date(2030, 1, 1, 12, 12, 12, 0, time.UTC) + block1ts := time.Date(2010, 1, 1, 12, 12, 12, 0, time.UTC) + block2ts := time.Date(2020, 1, 1, 12, 12, 12, 0, time.UTC) + block3ts := time.Date(2030, 1, 1, 12, 12, 12, 0, time.UTC) require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ - GenLogWithTimestamp(th.ChainID, 1, 1, utils.RandomAddress().String(), event[:], address, past), - GenLogWithTimestamp(th.ChainID, 1, 2, utils.RandomAddress().String(), event[:], address, now), - GenLogWithTimestamp(th.ChainID, 2, 2, utils.RandomAddress().String(), event[:], address, now), - GenLogWithTimestamp(th.ChainID, 1, 3, utils.RandomAddress().String(), event[:], address, future), + GenLogWithTimestamp(th.ChainID, 1, 1, utils.RandomAddress().String(), event[:], address, block1ts), + GenLogWithTimestamp(th.ChainID, 1, 2, utils.RandomAddress().String(), event[:], address, block2ts), + GenLogWithTimestamp(th.ChainID, 2, 2, utils.RandomAddress().String(), event[:], address, block2ts), + GenLogWithTimestamp(th.ChainID, 1, 3, utils.RandomAddress().String(), event[:], address, block3ts), })) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 1, past)) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 2, now)) - require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, future)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 1, block1ts, 0)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 2, block2ts, 1)) + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 3, block3ts, 2)) type expectedLog struct { block int64 @@ -1210,7 +1203,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "picks logs after block 1", confs: 0, - after: past, + after: block1ts, expectedLogs: []expectedLog{ {block: 2, log: 1}, {block: 2, log: 2}, @@ -1220,7 +1213,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "skips blocks with not enough confirmations", confs: 1, - after: past, + after: block1ts, expectedLogs: []expectedLog{ {block: 2, log: 1}, {block: 2, log: 2}, @@ -1229,7 +1222,7 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "limits number of blocks by block_timestamp", confs: 0, - after: now, + after: block2ts, expectedLogs: []expectedLog{ {block: 3, log: 1}, }, @@ -1237,37 +1230,74 @@ func TestSelectLogsCreatedAfter(t *testing.T) { { name: "returns empty dataset for future timestamp", confs: 0, - after: future, + after: block3ts, expectedLogs: []expectedLog{}, }, { name: "returns empty dataset when too many confirmations are required", confs: 3, - after: past, + after: block1ts, expectedLogs: []expectedLog{}, }, + { + name: "returns only finalized log", + confs: logpoller.Finalized, + after: block1ts, + expectedLogs: []expectedLog{ + {block: 2, log: 1}, + {block: 2, log: 2}, + }, + }, } for _, tt := range tests { t.Run("SelectLogsCreatedAfter"+tt.name, func(t *testing.T) { logs, err := th.ORM.SelectLogsCreatedAfter(address, event, tt.after, tt.confs) require.NoError(t, err) - assert.Len(t, logs, len(tt.expectedLogs)) + require.Len(t, logs, len(tt.expectedLogs)) for i, log := range logs { - assert.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) - assert.Equal(t, tt.expectedLogs[i].log, log.LogIndex) + require.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) + require.Equal(t, tt.expectedLogs[i].log, log.LogIndex) } }) t.Run("SelectIndexedLogsCreatedAfter"+tt.name, func(t *testing.T) { logs, err := th.ORM.SelectIndexedLogsCreatedAfter(address, event, 1, []common.Hash{event}, tt.after, tt.confs) require.NoError(t, err) - assert.Len(t, logs, len(tt.expectedLogs)) + require.Len(t, logs, len(tt.expectedLogs)) for i, log := range logs { - assert.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) - assert.Equal(t, tt.expectedLogs[i].log, log.LogIndex) + require.Equal(t, tt.expectedLogs[i].block, log.BlockNumber) + require.Equal(t, tt.expectedLogs[i].log, log.LogIndex) } }) } } + +func TestNestedLogPollerBlocksQuery(t *testing.T) { + th := SetupTH(t, false, 2, 3, 2) + event := EmitterABI.Events["Log1"].ID + address := utils.RandomAddress() + + require.NoError(t, th.ORM.InsertLogs([]logpoller.Log{ + GenLog(th.ChainID, 1, 8, utils.RandomAddress().String(), event[:], address), + })) + + // Empty logs when block are not persisted + logs, err := th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + require.NoError(t, err) + require.Len(t, logs, 0) + + // Persist block + require.NoError(t, th.ORM.InsertBlock(utils.RandomAddress().Hash(), 10, time.Now(), 0)) + + // Check if query actually works well with provided dataset + logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Unconfirmed) + require.NoError(t, err) + require.Len(t, logs, 1) + + // Empty logs when number of confirmations is too deep + logs, err = th.ORM.SelectIndexedLogs(address, event, 1, []common.Hash{event}, logpoller.Confirmations(4)) + require.NoError(t, err) + require.Len(t, logs, 0) +} diff --git a/core/chains/evm/logpoller/query.go b/core/chains/evm/logpoller/query.go index 2a50dc282e4..7443a860a85 100644 --- a/core/chains/evm/logpoller/query.go +++ b/core/chains/evm/logpoller/query.go @@ -51,33 +51,27 @@ func (q *queryArgs) withEventSig(eventSig common.Hash) *queryArgs { } func (q *queryArgs) withEventSigArray(eventSigs []common.Hash) *queryArgs { - q.args["event_sig_array"] = concatBytes(eventSigs) - return q + return q.withCustomArg("event_sig_array", concatBytes(eventSigs)) } func (q *queryArgs) withAddress(address common.Address) *queryArgs { - q.args["address"] = address - return q + return q.withCustomArg("address", address) } func (q *queryArgs) withAddressArray(addresses []common.Address) *queryArgs { - q.args["address_array"] = concatBytes(addresses) - return q + return q.withCustomArg("address_array", concatBytes(addresses)) } func (q *queryArgs) withStartBlock(startBlock int64) *queryArgs { - q.args["start_block"] = startBlock - return q + return q.withCustomArg("start_block", startBlock) } func (q *queryArgs) withEndBlock(endBlock int64) *queryArgs { - q.args["end_block"] = endBlock - return q + return q.withCustomArg("end_block", endBlock) } func (q *queryArgs) withWordIndex(wordIndex int) *queryArgs { - q.args["word_index"] = wordIndex - return q + return q.withCustomArg("word_index", wordIndex) } func (q *queryArgs) withWordValueMin(wordValueMin common.Hash) *queryArgs { @@ -89,8 +83,7 @@ func (q *queryArgs) withWordValueMax(wordValueMax common.Hash) *queryArgs { } func (q *queryArgs) withConfs(confs Confirmations) *queryArgs { - q.args["confs"] = confs - return q + return q.withCustomArg("confs", confs) } func (q *queryArgs) withTopicIndex(index int) *queryArgs { @@ -99,8 +92,7 @@ func (q *queryArgs) withTopicIndex(index int) *queryArgs { q.err = append(q.err, fmt.Errorf("invalid index for topic: %d", index)) } // Add 1 since postgresql arrays are 1-indexed. - q.args["topic_index"] = index + 1 - return q + return q.withCustomArg("topic_index", index+1) } func (q *queryArgs) withTopicValueMin(valueMin common.Hash) *queryArgs { @@ -112,13 +104,11 @@ func (q *queryArgs) withTopicValueMax(valueMax common.Hash) *queryArgs { } func (q *queryArgs) withTopicValues(values []common.Hash) *queryArgs { - q.args["topic_values"] = concatBytes(values) - return q + return q.withCustomArg("topic_values", concatBytes(values)) } func (q *queryArgs) withBlockTimestampAfter(after time.Time) *queryArgs { - q.args["block_timestamp_after"] = after - return q + return q.withCustomArg("block_timestamp_after", after) } func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { @@ -126,8 +116,7 @@ func (q *queryArgs) withTxHash(hash common.Hash) *queryArgs { } func (q *queryArgs) withCustomHashArg(name string, arg common.Hash) *queryArgs { - q.args[name] = arg.Bytes() - return q + return q.withCustomArg(name, arg.Bytes()) } func (q *queryArgs) withCustomArg(name string, arg any) *queryArgs { diff --git a/core/chains/evm/txmgr/txmgr_test.go b/core/chains/evm/txmgr/txmgr_test.go index 7932167ff48..de8c6ff4ef8 100644 --- a/core/chains/evm/txmgr/txmgr_test.go +++ b/core/chains/evm/txmgr/txmgr_test.go @@ -44,7 +44,7 @@ import ( func makeTestEvmTxm( t *testing.T, db *sqlx.DB, ethClient evmclient.Client, estimator gas.EvmFeeEstimator, ccfg txmgr.ChainConfig, fcfg txmgr.FeeConfig, txConfig evmconfig.Transactions, dbConfig txmgr.DatabaseConfig, listenerConfig txmgr.ListenerConfig, keyStore keystore.Eth, eventBroadcaster pg.EventBroadcaster) (txmgr.TxManager, error) { lggr := logger.TestLogger(t) - lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, 2, 3, 2, 1000) + lp := logpoller.NewLogPoller(logpoller.NewORM(testutils.FixtureChainID, db, lggr, pgtest.NewQConfig(true)), ethClient, lggr, 100*time.Millisecond, false, 2, 3, 2, 1000) // logic for building components (from evm/evm_txm.go) ------- lggr.Infow("Initializing EVM transaction manager", diff --git a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go index 30a7bafceb9..506dcb9ea33 100644 --- a/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go +++ b/core/services/ocr2/plugins/ocr2keeper/evm21/logprovider/integration_test.go @@ -662,7 +662,7 @@ func setupDependencies(t *testing.T, db *sqlx.DB, backend *backends.SimulatedBac pollerLggr := logger.TestLogger(t) pollerLggr.SetLogLevel(zapcore.WarnLevel) lorm := logpoller.NewORM(big.NewInt(1337), db, pollerLggr, pgtest.NewQConfig(false)) - lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp := logpoller.NewLogPoller(lorm, ethClient, pollerLggr, 100*time.Millisecond, false, 1, 2, 2, 1000) return lp, ethClient } diff --git a/core/services/pg/q.go b/core/services/pg/q.go index 098334bf1e7..3210af1c1d6 100644 --- a/core/services/pg/q.go +++ b/core/services/pg/q.go @@ -256,6 +256,7 @@ func (q Q) Get(dest interface{}, query string, args ...interface{}) error { return ql.withLogError(q.Queryer.GetContext(ctx, dest, query, args...)) } + func (q Q) GetNamed(sql string, dest interface{}, arg interface{}) error { query, args, err := q.BindNamed(sql, arg) if err != nil { diff --git a/core/services/relay/evm/config_poller_test.go b/core/services/relay/evm/config_poller_test.go index 73c16a19596..0a433c3bc54 100644 --- a/core/services/relay/evm/config_poller_test.go +++ b/core/services/relay/evm/config_poller_test.go @@ -87,7 +87,7 @@ func TestConfigPoller(t *testing.T) { ethClient = evmclient.NewSimulatedBackendClient(t, b, testutils.SimulatedChainID) ctx := testutils.Context(t) lorm := logpoller.NewORM(testutils.SimulatedChainID, db, lggr, cfg) - lp = logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp = logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) require.NoError(t, lp.Start(ctx)) t.Cleanup(func() { lp.Close() }) } diff --git a/core/services/relay/evm/functions/config_poller_test.go b/core/services/relay/evm/functions/config_poller_test.go index d6573ef3544..085f0c6e317 100644 --- a/core/services/relay/evm/functions/config_poller_test.go +++ b/core/services/relay/evm/functions/config_poller_test.go @@ -80,7 +80,7 @@ func runTest(t *testing.T, pluginType functions.FunctionsPluginType, expectedDig lggr := logger.TestLogger(t) ctx := testutils.Context(t) lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) - lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) defer lp.Close() require.NoError(t, lp.Start(ctx)) configPoller, err := functions.NewFunctionsConfigPoller(pluginType, lp, lggr) diff --git a/core/services/relay/evm/mercury/helpers_test.go b/core/services/relay/evm/mercury/helpers_test.go index 4e3587b5de6..3a58a25a557 100644 --- a/core/services/relay/evm/mercury/helpers_test.go +++ b/core/services/relay/evm/mercury/helpers_test.go @@ -169,7 +169,7 @@ func SetupTH(t *testing.T, feedID common.Hash) TestHarness { lggr := logger.TestLogger(t) ctx := testutils.Context(t) lorm := logpoller.NewORM(big.NewInt(1337), db, lggr, cfg) - lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, 1, 2, 2, 1000) + lp := logpoller.NewLogPoller(lorm, ethClient, lggr, 100*time.Millisecond, false, 1, 2, 2, 1000) eventBroadcaster := pgmocks.NewEventBroadcaster(t) subscription := pgmocks.NewSubscription(t) require.NoError(t, lp.Start(ctx)) diff --git a/core/store/migrate/migrate_test.go b/core/store/migrate/migrate_test.go index d6135ce4529..7a1e38fb030 100644 --- a/core/store/migrate/migrate_test.go +++ b/core/store/migrate/migrate_test.go @@ -15,12 +15,16 @@ import ( "github.com/smartcontractkit/chainlink-relay/pkg/types" evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/logpoller" "github.com/smartcontractkit/chainlink/v2/core/config/env" "github.com/smartcontractkit/chainlink/v2/core/internal/cltest/heavyweight" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils" configtest "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/configtest/v2" + "github.com/smartcontractkit/chainlink/v2/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/v2/core/logger" "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" "github.com/smartcontractkit/chainlink/v2/core/services/job" + "github.com/smartcontractkit/chainlink/v2/core/services/pg" "github.com/smartcontractkit/chainlink/v2/core/services/pipeline" "github.com/smartcontractkit/chainlink/v2/core/services/relay" "github.com/smartcontractkit/chainlink/v2/core/store/migrate" @@ -437,3 +441,141 @@ func TestSetMigrationENVVars(t *testing.T) { require.Equal(t, actualChainID, chainID.String()) }) } + +func TestDatabaseBackFillWithMigration202(t *testing.T) { + _, db := heavyweight.FullTestDBEmptyV2(t, migrationDir, nil) + + err := goose.UpTo(db.DB, migrationDir, 201) + require.NoError(t, err) + + simulatedOrm := logpoller.NewORM(testutils.SimulatedChainID, db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 10, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 51, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 90, time.Now(), 0), err) + require.NoError(t, simulatedOrm.InsertBlock(testutils.Random32Byte(), 120, time.Now(), 23), err) + + baseOrm := logpoller.NewORM(big.NewInt(int64(84531)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, baseOrm.InsertBlock(testutils.Random32Byte(), 400, time.Now(), 0), err) + + klaytnOrm := logpoller.NewORM(big.NewInt(int64(1001)), db, logger.TestLogger(t), pgtest.NewQConfig(true)) + require.NoError(t, klaytnOrm.InsertBlock(testutils.Random32Byte(), 100, time.Now(), 0), err) + + err = goose.UpTo(db.DB, migrationDir, 202) + require.NoError(t, err) + + tests := []struct { + name string + blockNumber int64 + expectedFinalizedBlock int64 + orm *logpoller.DbORM + }{ + { + name: "last finalized block not changed if finality is too deep", + blockNumber: 10, + expectedFinalizedBlock: 0, + orm: simulatedOrm, + }, + { + name: "last finalized block is updated for first block", + blockNumber: 51, + expectedFinalizedBlock: 1, + orm: simulatedOrm, + }, + { + name: "last finalized block is updated", + blockNumber: 90, + expectedFinalizedBlock: 40, + orm: simulatedOrm, + }, + { + name: "last finalized block is not changed when finality is set", + blockNumber: 120, + expectedFinalizedBlock: 23, + orm: simulatedOrm, + }, + { + name: "use non default finality depth for chain 84531", + blockNumber: 400, + expectedFinalizedBlock: 200, + orm: baseOrm, + }, + { + name: "use default finality depth for chain 1001", + blockNumber: 100, + expectedFinalizedBlock: 99, + orm: klaytnOrm, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + block, err := tt.orm.SelectBlockByNumber(tt.blockNumber) + require.NoError(t, err) + require.Equal(t, tt.expectedFinalizedBlock, block.FinalizedBlockNumber) + }) + } +} + +func BenchmarkBackfillingRecordsWithMigration202(b *testing.B) { + previousMigration := int64(201) + backfillMigration := int64(202) + chainCount := 2 + // By default, log poller keeps up to 100_000 blocks in the database, this is the pessimistic case + maxLogsSize := 100_000 + // Disable Goose logging for benchmarking + goose.SetLogger(goose.NopLogger()) + _, db := heavyweight.FullTestDBEmptyV2(b, migrationDir, nil) + + err := goose.UpTo(db.DB, migrationDir, previousMigration) + require.NoError(b, err) + + q := pg.NewQ(db, logger.NullLogger, pgtest.NewQConfig(true)) + for j := 0; j < chainCount; j++ { + // Insert 100_000 block to database, can't do all at once, so batching by 10k + var blocks []logpoller.LogPollerBlock + for i := 0; i < maxLogsSize; i++ { + blocks = append(blocks, logpoller.LogPollerBlock{ + EvmChainId: utils.NewBigI(int64(j + 1)), + BlockHash: testutils.Random32Byte(), + BlockNumber: int64(i + 1000), + FinalizedBlockNumber: 0, + }) + } + batchInsertSize := 10_000 + for i := 0; i < maxLogsSize; i += batchInsertSize { + start, end := i, i+batchInsertSize + if end > maxLogsSize { + end = maxLogsSize + } + + err = q.ExecQNamed(` + INSERT INTO evm.log_poller_blocks + (evm_chain_id, block_hash, block_number, finalized_block_number, block_timestamp, created_at) + VALUES + (:evm_chain_id, :block_hash, :block_number, :finalized_block_number, NOW(), NOW()) + ON CONFLICT DO NOTHING`, blocks[start:end]) + require.NoError(b, err) + } + } + + b.ResetTimer() + + // 1. Measure time of migration 200 + // 2. Goose down to 199 + // 3. Reset last_finalized_block_number to 0 + // Repeat 1-3 + for i := 0; i < b.N; i++ { + b.StartTimer() + err = goose.UpTo(db.DB, migrationDir, backfillMigration) + require.NoError(b, err) + b.StopTimer() + + // Cleanup + err = goose.DownTo(db.DB, migrationDir, previousMigration) + require.NoError(b, err) + + err = q.ExecQ(` + UPDATE evm.log_poller_blocks + SET finalized_block_number = 0`) + require.NoError(b, err) + } +} diff --git a/core/store/migrate/migrations/0201_add_finalized_block_number.sql b/core/store/migrate/migrations/0201_add_finalized_block_number.sql new file mode 100644 index 00000000000..db15ebbed6e --- /dev/null +++ b/core/store/migrate/migrations/0201_add_finalized_block_number.sql @@ -0,0 +1,11 @@ +-- +goose Up +ALTER TABLE evm.log_poller_blocks + ADD COLUMN finalized_block_number + bigint not null + default 0 + check (finalized_block_number >= 0); + + +-- +goose Down +ALTER TABLE evm.log_poller_blocks + DROP COLUMN finalized_block_number; diff --git a/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql b/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql new file mode 100644 index 00000000000..0f93cd27482 --- /dev/null +++ b/core/store/migrate/migrations/0202_default_values_for_last_finalized_block.sql.sql @@ -0,0 +1,33 @@ +-- +goose Up + +WITH variables AS ( + SELECT + evm_chain_id, + CASE + WHEN evm_chain_id = 43113 then 1 -- Avax Fuji + WHEN evm_chain_id = 43114 then 1 -- Avax Mainnet + WHEN evm_chain_id = 84531 THEN 200 -- Base Goerli + WHEN evm_chain_id = 8453 THEN 200 -- Base Mainnet + WHEN evm_chain_id = 42220 THEN 1 -- Celo Mainnet + WHEN evm_chain_id = 44787 THEN 1 -- Celo Testnet + WHEN evm_chain_id = 8217 THEN 1 -- Klaytn Mainnet + WHEN evm_chain_id = 1001 THEN 1 -- Klaytn Mainnet + WHEN evm_chain_id = 1088 THEN 1 -- Metis Mainnet + WHEN evm_chain_id = 588 THEN 1 -- Metis Rinkeby + WHEN evm_chain_id = 420 THEN 200 -- Optimism Goerli + WHEN evm_chain_id = 10 THEN 200 -- Optimism Mainnet + WHEN evm_chain_id = 137 THEN 500 -- Polygon Mainnet + WHEN evm_chain_id = 80001 THEN 500 -- Polygon Mumbai + WHEN evm_chain_id = 534352 THEN 1 -- Scroll Mainnet + WHEN evm_chain_id = 534351 THEN 1 -- Scroll Sepolia + ELSE 50 -- all other chains + END AS finality_depth + FROM evm.log_poller_blocks + GROUP BY evm_chain_id +) + +UPDATE evm.log_poller_blocks AS lpb +SET finalized_block_number = greatest(lpb.block_number - v.finality_depth, 0) +FROM variables v +WHERE lpb.evm_chain_id = v.evm_chain_id + AND lpb.finalized_block_number = 0; \ No newline at end of file