From adf13dc1f11e2321a9b67a483c6f1e0594e77c85 Mon Sep 17 00:00:00 2001 From: HenryNguyen5 <6404866+HenryNguyen5@users.noreply.github.com> Date: Fri, 10 Jan 2025 14:45:03 -0800 Subject: [PATCH] Keystone in CRIB (#14326) * Add node api wrapper for ergonomic cmd usage * Add streams trigger template * Add mock external adapter for v03 mercury * First pass of streams trigger provisioning * WIP: Add capabilities registry provisioner script * Update nix flake * Fixup provisioning scripts * Change default chainid to be 1337 * Add nil check for balances * Add ability to skip tls verification for local dev * Gently fail on not loading contracts for ocr job deletion * fixup! Change default chainid to be 1337 * Formatting * Change ocr file flag default * Allow for multiple OCR2KB selection in key fetching * Support on/offchain transmitter OCR3 config generation * Properly reset clientmethod on each invocation * Add mercury contract deployment feature * Get oracles to successfully connect to each other * Keep OCR3 and OCR2 config separate * Add goreleaser setup for mock ea * Add support for updating bridges * Add UpdateBridge CLI command * Cleanup comments * Fix typo * Add revert detection and revert reason extraction * Add missing env field to CR struct * Fix CR deployment bugs * Fix trigger capability typo * Add external registry and capability p2p config gen * Add redial support for logging in * Fix capability registration * HACK: Add keystone workflow deployment to streams trigger cmd * Typo * Log ocr3 config more extensively * Set isPublic to false for all-in-one DON * Use nodeapi for deleting ocr3 jobs * Have mock EA return consistent prices every 10 sec * Remove pluginconfig to properly enable trigger * Add additional logging * Fix rebase errors * Shim ksdeploy types * Fix goreleaser config for mock ea * Dont depend on cgo for mock EA * Handle aptos key creation * Tune mercury OCR rounds to be less freq * Use deployments rather than pods * Overhaul node host + url handling * Add dummy encryption public key to nodes * Add missing ctx * Initial multidon support * Fix ingress generation for postprevision * Fix argument ordering * Fix nodelist sorting and evmconfig.workflow configuration * Update tests * Assign keystone workflows to workflow nodes * Expose capabilities on WorkflowDON * Skip bootstrap node for keystone workflows * Refactor nodelists + pubkeys -> nodesets * Skip adding bootstrap nodes to capability registry * Skip bootstrap node for mercury OCR config * Formatting * Fix stale print statement * Bump nodeset size to minimum 5 since > 2F+1 * Use service name for DNS resolution * Update tests * Fix missing / incorrect fields for keystone workflow * Update gomods * Simplify node key handling * Formatting * Add test mocks * Refactor - Use single entrypoint for provisiong keystone in CRIB * Add OCR3 caching * Refactor provisioning flags and improve argument validation in keystone script * Refactor: Remove stale references and fix refactor related bugs * Create artefacts dir if it doesnt exist * Simplify jobspec and bridge handling * Remove unneeded token transfer calls * Refactor: Cleanup logging, add more tx caching * Fix post-rebase errors * Fix OCR3 digest comparison * Remove extra cmd cruft * Remove unused func * Undo transmitter changes * Revert "Add test mocks" This reverts commit 75cafe96a08d6495c8040076a053e3e1a33e4154. * Fix caching * Remove deprecated assertion call * Update gomod * Fix linter warns * Add changeset * Add additional logging around node sets and key fetching * Run gomodtidy * Fix additional lints * Harden API request logic * Readme WIP * Clean up lints, remove old readme * Increase retry interval * Update goreleaser to 2.4.4-pro * Handle non postfix path * Resolve darwin shell hook from git root * Bump streams trigger cap to 1.1.0 * Create toolkit sub-cli * Cleanup toolkit * Reverse URLs for nodelists * Update snapshots * Remove unneeded gosec ignore * Update gomods * Log when we set ocr3 config * Cleanup argument parsing * Fix nodes list parsing * Fix lints + address feedback * Update gomods to point to this branches pseudo version * Update gomods to point to this branches pseudo version * Bump wrappers to 1.1.0 * fix test indentation, quoting * revert bad merge to main; workflow.go, cap_encoder* * linter --------- Co-authored-by: Justin Kaseman Co-authored-by: krehermann <16602512+krehermann@users.noreply.github.com> --- .changeset/loud-birds-remain.md | 5 + core/cmd/bridge_commands.go | 23 + core/cmd/bridge_commands_test.go | 42 ++ core/scripts/common/helpers.go | 16 +- core/scripts/go.mod | 8 +- .../keystone/01_deploy_contracts-sample.sh | 11 - .../keystone/02_deploy_jobspecs-sample.sh | 7 - core/scripts/keystone/03_gen_crib-sample.sh | 6 - .../keystone/04_delete_ocr3_jobs-sample.sh | 3 - ...initialize_capabilities_registry-sample.sh | 8 - core/scripts/keystone/README.md | 91 --- core/scripts/keystone/artefacts/README.md | 1 - core/scripts/keystone/main.go | 8 +- .../keystone/src/01_deploy_contracts_cmd.go | 226 ------- .../keystone/src/01_provision_keystone.go | 217 +++++++ core/scripts/keystone/src/01_toolkit.go | 212 +++++++ core/scripts/keystone/src/01_toolkit_test.go | 49 ++ .../keystone/src/02_deploy_jobspecs_cmd.go | 165 ----- .../src/02_deploy_keystone_workflows.go | 134 ++++ .../src/02_deploy_keystone_workflows_test.go | 21 + .../keystone/src/02_fund_transmitters.go | 52 ++ .../src/02_provision_capabilities_registry.go | 68 +++ .../scripts/keystone/src/02_provision_crib.go | 310 ++++++++++ .../keystone/src/02_provision_crib_test.go | 44 ++ .../src/02_provision_forwarder_contract.go | 33 + .../src/02_provision_ocr3_capability.go | 286 +++++++++ .../src/02_provision_ocr3_capability_test.go | 67 ++ ...02_provision_streams_trigger_capability.go | 522 ++++++++++++++++ ...ovision_streams_trigger_capability_test.go | 57 ++ .../src/03_gen_crib_cluster_overrides_cmd.go | 86 --- .../03_gen_crib_cluster_overrides_cmd_test.go | 19 - .../keystone/src/04_delete_ocr3_jobs_cmd.go | 101 --- ...deploy_initialize_capabilities_registry.go | 82 +-- .../keystone/src/06_deploy_workflows_cmd.go | 71 --- .../keystone/src/07_delete_workflows_cmd.go | 74 --- .../src/88_capabilities_registry_helpers.go | 578 ++++++++++++++++++ .../keystone/src/88_contracts_helpers.go | 192 ++++++ core/scripts/keystone/src/88_gen_jobspecs.go | 91 --- .../keystone/src/88_gen_jobspecs_test.go | 37 -- .../keystone/src/88_gen_ocr3_config.go | 20 - .../keystone/src/88_gen_ocr3_config_test.go | 31 - .../keystone/src/88_jobspecs_helpers.go | 53 ++ core/scripts/keystone/src/88_ocr_helpers.go | 69 +++ core/scripts/keystone/src/99_app.go | 386 +++++++++++- core/scripts/keystone/src/99_crib_client.go | 71 ++- core/scripts/keystone/src/99_fetch_keys.go | 397 ++++++------ core/scripts/keystone/src/99_files.go | 67 +- core/scripts/keystone/src/99_files_test.go | 36 -- core/scripts/keystone/src/99_k8s_client.go | 74 ++- core/scripts/keystone/src/99_nodes.go | 72 --- .../02_deploy_keystone_workflows_test.snap | 57 ++ .../__snapshots__/02_provision_crib_test.snap | 415 +++++++++++++ .../02_provision_ocr3_capability_test.snap | 65 ++ ...ision_streams_trigger_capability_test.snap | 50 ++ ...3_gen_crib_cluster_overrides_cmd_test.snap | 44 -- .../__snapshots__/88_gen_jobspecs_test.snap | 140 ----- .../88_gen_ocr3_config_test.snap | 23 - .../src/external-adapter/.goreleaser.yaml | 49 ++ .../external-adapter/99_external_adapter.go | 154 +++++ .../keystone/src/external-adapter/Dockerfile | 5 + .../keystone/src/testdata/NodeList.txt | 5 - .../keystone/src/testdata/PublicKeys.json | 57 -- .../keystone/src/testdata/node_sets.json | 298 +++++++++ .../scripts/keystone/templates/bootstrap.toml | 9 - .../keystone/templates/crib-overrides.yaml | 41 -- core/scripts/keystone/templates/oracle.toml | 27 - core/services/job/models.go | 2 +- deployment/go.mod | 2 +- integration-tests/go.mod | 4 +- integration-tests/load/go.mod | 6 +- shell.nix | 3 +- tools/goreleaser-config/go.mod | 2 +- 72 files changed, 4851 insertions(+), 1906 deletions(-) create mode 100644 .changeset/loud-birds-remain.md delete mode 100755 core/scripts/keystone/01_deploy_contracts-sample.sh delete mode 100755 core/scripts/keystone/02_deploy_jobspecs-sample.sh delete mode 100755 core/scripts/keystone/03_gen_crib-sample.sh delete mode 100755 core/scripts/keystone/04_delete_ocr3_jobs-sample.sh delete mode 100755 core/scripts/keystone/05_deploy_and_initialize_capabilities_registry-sample.sh delete mode 100644 core/scripts/keystone/README.md delete mode 100644 core/scripts/keystone/artefacts/README.md delete mode 100644 core/scripts/keystone/src/01_deploy_contracts_cmd.go create mode 100644 core/scripts/keystone/src/01_provision_keystone.go create mode 100644 core/scripts/keystone/src/01_toolkit.go create mode 100644 core/scripts/keystone/src/01_toolkit_test.go delete mode 100644 core/scripts/keystone/src/02_deploy_jobspecs_cmd.go create mode 100644 core/scripts/keystone/src/02_deploy_keystone_workflows.go create mode 100644 core/scripts/keystone/src/02_deploy_keystone_workflows_test.go create mode 100644 core/scripts/keystone/src/02_fund_transmitters.go create mode 100644 core/scripts/keystone/src/02_provision_capabilities_registry.go create mode 100644 core/scripts/keystone/src/02_provision_crib.go create mode 100644 core/scripts/keystone/src/02_provision_crib_test.go create mode 100644 core/scripts/keystone/src/02_provision_forwarder_contract.go create mode 100644 core/scripts/keystone/src/02_provision_ocr3_capability.go create mode 100644 core/scripts/keystone/src/02_provision_ocr3_capability_test.go create mode 100644 core/scripts/keystone/src/02_provision_streams_trigger_capability.go create mode 100644 core/scripts/keystone/src/02_provision_streams_trigger_capability_test.go delete mode 100644 core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd.go delete mode 100644 core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd_test.go delete mode 100644 core/scripts/keystone/src/04_delete_ocr3_jobs_cmd.go delete mode 100644 core/scripts/keystone/src/06_deploy_workflows_cmd.go delete mode 100644 core/scripts/keystone/src/07_delete_workflows_cmd.go create mode 100644 core/scripts/keystone/src/88_capabilities_registry_helpers.go create mode 100644 core/scripts/keystone/src/88_contracts_helpers.go delete mode 100644 core/scripts/keystone/src/88_gen_jobspecs.go delete mode 100644 core/scripts/keystone/src/88_gen_jobspecs_test.go delete mode 100644 core/scripts/keystone/src/88_gen_ocr3_config.go delete mode 100644 core/scripts/keystone/src/88_gen_ocr3_config_test.go create mode 100644 core/scripts/keystone/src/88_jobspecs_helpers.go create mode 100644 core/scripts/keystone/src/88_ocr_helpers.go delete mode 100644 core/scripts/keystone/src/99_files_test.go delete mode 100644 core/scripts/keystone/src/99_nodes.go create mode 100755 core/scripts/keystone/src/__snapshots__/02_deploy_keystone_workflows_test.snap create mode 100755 core/scripts/keystone/src/__snapshots__/02_provision_crib_test.snap create mode 100755 core/scripts/keystone/src/__snapshots__/02_provision_ocr3_capability_test.snap create mode 100755 core/scripts/keystone/src/__snapshots__/02_provision_streams_trigger_capability_test.snap delete mode 100755 core/scripts/keystone/src/__snapshots__/03_gen_crib_cluster_overrides_cmd_test.snap delete mode 100755 core/scripts/keystone/src/__snapshots__/88_gen_jobspecs_test.snap delete mode 100755 core/scripts/keystone/src/__snapshots__/88_gen_ocr3_config_test.snap create mode 100644 core/scripts/keystone/src/external-adapter/.goreleaser.yaml create mode 100644 core/scripts/keystone/src/external-adapter/99_external_adapter.go create mode 100644 core/scripts/keystone/src/external-adapter/Dockerfile delete mode 100644 core/scripts/keystone/src/testdata/NodeList.txt delete mode 100644 core/scripts/keystone/src/testdata/PublicKeys.json create mode 100644 core/scripts/keystone/src/testdata/node_sets.json delete mode 100644 core/scripts/keystone/templates/bootstrap.toml delete mode 100644 core/scripts/keystone/templates/crib-overrides.yaml delete mode 100644 core/scripts/keystone/templates/oracle.toml diff --git a/.changeset/loud-birds-remain.md b/.changeset/loud-birds-remain.md new file mode 100644 index 00000000000..eb1e8f8a9ca --- /dev/null +++ b/.changeset/loud-birds-remain.md @@ -0,0 +1,5 @@ +--- +"chainlink": minor +--- + +#internal Add unexposed shell cmd for updating a bridge diff --git a/core/cmd/bridge_commands.go b/core/cmd/bridge_commands.go index 398d466c43a..cd314b23218 100644 --- a/core/cmd/bridge_commands.go +++ b/core/cmd/bridge_commands.go @@ -128,6 +128,29 @@ func (s *Shell) CreateBridge(c *cli.Context) (err error) { return s.renderAPIResponse(resp, &BridgePresenter{}) } +func (s *Shell) UpdateBridge(c *cli.Context) (err error) { + if !c.Args().Present() { + return s.errorOut(errors.New("must pass the name of the bridge to be updated")) + } + bridgeName := c.Args().First() + buf, err := getBufferFromJSON(c.Args().Get(1)) + if err != nil { + return s.errorOut(err) + } + + resp, err := s.HTTP.Patch(s.ctx(), "/v2/bridge_types/"+bridgeName, buf) + if err != nil { + return s.errorOut(err) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + return s.renderAPIResponse(resp, &BridgePresenter{}) +} + // RemoveBridge removes a specific Bridge by name. func (s *Shell) RemoveBridge(c *cli.Context) (err error) { if !c.Args().Present() { diff --git a/core/cmd/bridge_commands_test.go b/core/cmd/bridge_commands_test.go index f05aac52cd9..5523fc09605 100644 --- a/core/cmd/bridge_commands_test.go +++ b/core/cmd/bridge_commands_test.go @@ -3,6 +3,7 @@ package cmd_test import ( "bytes" "flag" + "fmt" "testing" "time" @@ -191,3 +192,44 @@ func TestShell_RemoveBridge(t *testing.T) { assert.Equal(t, bt.URL.String(), p.URL) assert.Equal(t, bt.Confirmations, p.Confirmations) } +func TestShell_UpdateBridge(t *testing.T) { + t.Parallel() + + app := startNewApplicationV2(t, nil) + client, _ := app.NewShellAndRenderer() + name := testutils.RandomizeName("updatebridge") + + bt := &bridges.BridgeType{ + Name: bridges.MustParseBridgeName(name), + URL: cltest.WebURL(t, "https://testing.com/bridges"), + Confirmations: 0, + } + require.NoError(t, app.BridgeORM().CreateBridgeType(testutils.Context(t), bt)) + tests := []struct { + name string + args []string + errored bool + }{ + {"NoArgs", []string{}, true}, + {"OnlyName", []string{name}, true}, + {"ValidUpdate", []string{name, fmt.Sprintf(`{ "name": "%s", "url": "http://localhost:3000/updated" }`, name)}, false}, + {"InvalidJSON", []string{name, `{ "url": "http://localhost:3000/updated"`}, true}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + set := flag.NewFlagSet("bridge", 0) + flagSetApplyFromAction(client.UpdateBridge, set, "") + + require.NoError(t, set.Parse(test.args)) + + c := cli.NewContext(nil, set, nil) + if test.errored { + assert.Error(t, client.UpdateBridge(c)) + } else { + assert.NoError(t, client.UpdateBridge(c)) + } + }) + } +} diff --git a/core/scripts/common/helpers.go b/core/scripts/common/helpers.go index 57c8c15e405..97ca2dd4929 100644 --- a/core/scripts/common/helpers.go +++ b/core/scripts/common/helpers.go @@ -3,10 +3,12 @@ package common import ( "context" "crypto/ecdsa" + "crypto/tls" "encoding/hex" "flag" "fmt" "math/big" + "net/http" "os" "strconv" "strings" @@ -69,11 +71,17 @@ func SetupEnv(overrideNonce bool) Environment { panic("need account key") } - ec, err := ethclient.Dial(ethURL) - PanicErr(err) - - jsonRPCClient, err := rpc.Dial(ethURL) + insecureSkipVerify := os.Getenv("INSECURE_SKIP_VERIFY") == "true" + tr := &http.Transport{ + // User enables this at their own risk! + // #nosec G402 + TLSClientConfig: &tls.Config{InsecureSkipVerify: insecureSkipVerify}, + } + httpClient := &http.Client{Transport: tr} + rpcConfig := rpc.WithHTTPClient(httpClient) + jsonRPCClient, err := rpc.DialOptions(context.Background(), ethURL, rpcConfig) PanicErr(err) + ec := ethclient.NewClient(jsonRPCClient) chainID, err := strconv.ParseInt(chainIDEnv, 10, 64) PanicErr(err) diff --git a/core/scripts/go.mod b/core/scripts/go.mod index b86baf9a203..897962a6454 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -34,6 +34,8 @@ require ( github.com/shopspring/decimal v1.4.0 github.com/smartcontractkit/chainlink-automation v0.8.1 github.com/smartcontractkit/chainlink-common v0.4.1-0.20250108194320-2ebd63bbb16e + github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241216163550-fa030d178ba3 + github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 github.com/smartcontractkit/libocr v0.0.0-20241223215956-e5b78d8e3919 github.com/spf13/cobra v1.8.1 github.com/spf13/viper v1.19.0 @@ -41,7 +43,9 @@ require ( github.com/umbracle/ethgo v0.1.3 github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 github.com/urfave/cli v1.22.14 + go.uber.org/zap v1.27.0 google.golang.org/protobuf v1.35.1 + gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.31.1 @@ -309,14 +313,12 @@ require ( github.com/smartcontractkit/chainlink-ccip v0.0.0-20250110181647-9dba278f2103 // indirect github.com/smartcontractkit/chainlink-ccip/chains/solana v0.0.0-20250103152858-8973fd0c912b // indirect github.com/smartcontractkit/chainlink-cosmos v0.5.2-0.20241202195413-82468150ac1e // indirect - github.com/smartcontractkit/chainlink-data-streams v0.1.1-0.20241216163550-fa030d178ba3 // indirect github.com/smartcontractkit/chainlink-feeds v0.1.1 // indirect github.com/smartcontractkit/chainlink-framework/multinode v0.0.0-20241220173418-09e17ddbeb20 // indirect github.com/smartcontractkit/chainlink-protos/job-distributor v0.6.0 // indirect github.com/smartcontractkit/chainlink-protos/orchestrator v0.4.0 // indirect github.com/smartcontractkit/chainlink-solana v1.1.1-0.20250110142550-e2a9566d39f3 // indirect github.com/smartcontractkit/chainlink-starknet/relayer v0.1.1-0.20241202202529-2033490e77b8 // indirect - github.com/smartcontractkit/chainlink-testing-framework/lib v1.50.13 // indirect github.com/smartcontractkit/grpc-proxy v0.0.0-20240830132753-a7e17fec5ab7 // indirect github.com/smartcontractkit/tdh2/go/ocr2/decryptionplugin v0.0.0-20241009055228-33d0c0bf38de // indirect github.com/smartcontractkit/tdh2/go/tdh2 v0.0.0-20241009055228-33d0c0bf38de // indirect @@ -381,7 +383,6 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/ratelimit v0.3.1 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/arch v0.11.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect @@ -405,7 +406,6 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect diff --git a/core/scripts/keystone/01_deploy_contracts-sample.sh b/core/scripts/keystone/01_deploy_contracts-sample.sh deleted file mode 100755 index 89e77f4556f..00000000000 --- a/core/scripts/keystone/01_deploy_contracts-sample.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -go run main.go \ - deploy-contracts \ - --ocrfile=ocr_config.json \ - --chainid=11155111 \ - --ethurl=ETH_URL \ - --accountkey=ACCOUNT_KEY \ - --onlysetconfig=false \ - --skipfunding=false \ - --dryrun=false diff --git a/core/scripts/keystone/02_deploy_jobspecs-sample.sh b/core/scripts/keystone/02_deploy_jobspecs-sample.sh deleted file mode 100755 index e99d54e0d3b..00000000000 --- a/core/scripts/keystone/02_deploy_jobspecs-sample.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -go run main.go \ - deploy-jobspecs \ - --chainid=11155111 \ - --p2pport=6690 \ - --onlyreplay=false diff --git a/core/scripts/keystone/03_gen_crib-sample.sh b/core/scripts/keystone/03_gen_crib-sample.sh deleted file mode 100755 index 9193ef4f75b..00000000000 --- a/core/scripts/keystone/03_gen_crib-sample.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -go run main.go \ - generate-crib \ - --chainid=11155111 \ - --outpath=/tmp diff --git a/core/scripts/keystone/04_delete_ocr3_jobs-sample.sh b/core/scripts/keystone/04_delete_ocr3_jobs-sample.sh deleted file mode 100755 index 3f3b50b055c..00000000000 --- a/core/scripts/keystone/04_delete_ocr3_jobs-sample.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -go run main.go delete-ocr3-jobs diff --git a/core/scripts/keystone/05_deploy_and_initialize_capabilities_registry-sample.sh b/core/scripts/keystone/05_deploy_and_initialize_capabilities_registry-sample.sh deleted file mode 100755 index 21c764be0e8..00000000000 --- a/core/scripts/keystone/05_deploy_and_initialize_capabilities_registry-sample.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -go run main.go \ - deploy-and-initialize-capabilities-registry \ - --chainid=11155111 \ - --ethurl=$ETH_URL \ - --accountkey=$ACCOUNT_KEY \ - --craddress=$CR_ADDRESS \ // 0x0d36aAC2Fd9d6d1C1F59251be6A2B337af27C52B diff --git a/core/scripts/keystone/README.md b/core/scripts/keystone/README.md deleted file mode 100644 index f08f738cb78..00000000000 --- a/core/scripts/keystone/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# Provisioning a CRIB keystone cluster - -Kudos to Functions team for inspiration. - -This document outlines the steps to provision a CRIB keystone cluster for testing OCR3. - -## Pre-requisites - -### Blockchain Node - -An HTTP URL to a blockchain node, such as a Geth node. This should be the same blockchain node that you used to deploy the chainlink node cluster. - -### Private Key - -A private key to a testing wallet to use for deployment and funding. This wallet should have some native token on the chain you're deploying to. For Sepolia, around 2 ETH should be sufficient. - -The easiest way to set this up is to download [Metamask](https://metamask.io/) and create a new wallet. Once you have created a wallet, you can export the private key by clicking on the three dots next to the wallet name, selecting "Account Details", and then "Show Private Key". - -## Usage - -### Your first deployment - -Using devspace, we can deploy a cluster and provision it via the `keystone` devspace profile. You'll want to follow the instructions in the [CRIB README](../../../crib/README.md) to set up your environment and deploy the cluster. - -**NOTE**: You'll want to deploy using the `keystone` profile, not the default profile file. - -```bash -# From /crib -devspace deploy --profile keystone -``` - -For convenience, setting the TTL to be a much longer value is helpful, otherwise the testnet native tokens that you send to nodes will be lost. You can set this in your crib `.env` file, or interactively via: - -```bash -# From /crib -devspace run ttl ${namespace} 7d -``` - -Everytime the interactive command is run, the TTL is reset. - -### Iterate -Let's say you made some changes to the codebase, and you want to see that reflected within the cluster. Simply redeploy via: -```bash -devspace deploy --profile keystone -``` - -### Restarting from a fresh slate - -If you want to redeploy all resources, then you'll want to do the following: - -```bash -# From /crib -devspace purge --profile keystone # Remove all k8s resources -DEVSPACE_NAMESPACE=crib- crib init # Purge currently leaves some hanging resources, make a new namespace -devspace deploy --profile keysone --clean # Wipe any keystone related persisted data, like artefacts and caches. -``` - -## What does Provisioning a CRIB keystone cluster do? - -### Provision On-Chain Resources - -This will provision on-chain resources, namely: - -1. Deploy the forwarder contract -2. Deploy OCR3 config contract -3. Setting the configuration for the OCR3 contract -4. Funding transmitters with native tokens - -When the on-chain resources are deployed, a json file within `artefacts` will be generated. This file will contain the addresses of the forwarder contract, the OCR3 config contract, and the block number at which the configuration was set. Be careful about deleting this file, as if you lose it, you will need to redeploy the contracts and run through all proceeding steps. - -### Job Spec Deployment - -The next step is to deploy the OCR3 job specs to the chainlink node cluster. This will create a bootstrapping job for the first node of the cluster (determined via alphabetical order) and an OCR job for each other node in the cluster. - -### Update Per-Node TOML Configuration - -While we already have the chainlink node cluster deployed, we need to update the TOML configuration for each node to configure the `ChainWriter`. -After updated TOML configuration overrides are generated per node, the cluster is redeployed such that the updates that effect without wiping the databases. - -## Future Work - -### Keystone workflow deployment -Workflow style job spec deployments are not currently support, but it should be a minor modification to the existing OCR job spec deployment logic - -### Multi-DON support -Multiple DONs are not currently supported -- the devspace profile will need to be expanded so that we have multiple deployments, one per DON. -- network policy / open ports will likely have to be adjusted in the chart - -### Smarter jobspec deployment -Currently, job specs deployment logic is dumb. The scripts don't check if the jobspec to deploy already exists. If you need to redeploy a job spec that has the same name as a currently uploaded one, you'll want to delete the existing job specs via `./04_delete_ocr3_jobs.sh`. diff --git a/core/scripts/keystone/artefacts/README.md b/core/scripts/keystone/artefacts/README.md deleted file mode 100644 index 68f06dbd1c8..00000000000 --- a/core/scripts/keystone/artefacts/README.md +++ /dev/null @@ -1 +0,0 @@ -All generated artefacts will be saved here. \ No newline at end of file diff --git a/core/scripts/keystone/main.go b/core/scripts/keystone/main.go index 3486830ca32..4bd8dea0e5f 100644 --- a/core/scripts/keystone/main.go +++ b/core/scripts/keystone/main.go @@ -15,13 +15,9 @@ type command interface { func main() { commands := []command{ - src.NewDeployContractsCommand(), - src.NewDeployJobSpecsCommand(), - src.NewGenerateCribClusterOverridesCommand(), - src.NewDeleteJobsCommand(), + src.NewProvisionKeystoneCommand(), src.NewDeployAndInitializeCapabilitiesRegistryCommand(), - src.NewDeployWorkflowsCommand(), - src.NewDeleteWorkflowsCommand(), + src.NewToolkit(), } commandsList := func(commands []command) string { diff --git a/core/scripts/keystone/src/01_deploy_contracts_cmd.go b/core/scripts/keystone/src/01_deploy_contracts_cmd.go deleted file mode 100644 index 14c8d989063..00000000000 --- a/core/scripts/keystone/src/01_deploy_contracts_cmd.go +++ /dev/null @@ -1,226 +0,0 @@ -package src - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "math/big" - "os" - "path/filepath" - - "github.com/ethereum/go-ethereum/common" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" - "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" - forwarder "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/forwarder_1_0_0" - ocr3_capability "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/ocr3_capability_1_0_0" -) - -type deployedContracts struct { - OCRContract common.Address `json:"ocrContract"` - ForwarderContract common.Address `json:"forwarderContract"` - // The block number of the transaction that set the config on the OCR3 contract. We use this to replay blocks from this point on - // when we load the OCR3 job specs on the nodes. - SetConfigTxBlock uint64 `json:"setConfigTxBlock"` -} - -type deployContracts struct{} - -func NewDeployContractsCommand() *deployContracts { - return &deployContracts{} -} - -func (g *deployContracts) Name() string { - return "deploy-contracts" -} - -// Run expects the follow environment variables to be set: -// -// 1. Deploys the OCR3 contract -// 2. Deploys the Forwarder contract -// 3. Sets the config on the OCR3 contract -// 4. Writes the deployed contract addresses to a file -// 5. Funds the transmitters -func (g *deployContracts) Run(args []string) { - fs := flag.NewFlagSet(g.Name(), flag.ExitOnError) - ocrConfigFile := fs.String("ocrfile", "config_example.json", "path to OCR config file") - // create flags for all of the env vars then set the env vars to normalize the interface - // this is a bit of a hack but it's the easiest way to make this work - ethUrl := fs.String("ethurl", "", "URL of the Ethereum node") - chainID := fs.Int64("chainid", 11155111, "chain ID of the Ethereum network to deploy to") - accountKey := fs.String("accountkey", "", "private key of the account to deploy from") - skipFunding := fs.Bool("skipfunding", false, "skip funding the transmitters") - onlySetConfig := fs.Bool("onlysetconfig", false, "set the config on the OCR3 contract without deploying the contracts or funding transmitters") - dryRun := fs.Bool("dryrun", false, "dry run, don't actually deploy the contracts and do not fund transmitters") - publicKeys := fs.String("publickeys", "", "Custom public keys json location") - nodeList := fs.String("nodes", "", "Custom node list location") - artefactsDir := fs.String("artefacts", "", "Custom artefacts directory location") - - err := fs.Parse(args) - - if err != nil || - *ocrConfigFile == "" || ocrConfigFile == nil || - *ethUrl == "" || ethUrl == nil || - *chainID == 0 || chainID == nil || - *accountKey == "" || accountKey == nil { - fs.Usage() - os.Exit(1) - } - - if *artefactsDir == "" { - *artefactsDir = defaultArtefactsDir - } - if *publicKeys == "" { - *publicKeys = defaultPublicKeys - } - if *nodeList == "" { - *nodeList = defaultNodeList - } - - os.Setenv("ETH_URL", *ethUrl) - os.Setenv("ETH_CHAIN_ID", fmt.Sprintf("%d", *chainID)) - os.Setenv("ACCOUNT_KEY", *accountKey) - - deploy(*nodeList, *publicKeys, *ocrConfigFile, *skipFunding, *dryRun, *onlySetConfig, *artefactsDir) -} - -// deploy does the following: -// 1. Deploys the OCR3 contract -// 2. Deploys the Forwarder contract -// 3. Sets the config on the OCR3 contract -// 4. Writes the deployed contract addresses to a file -// 5. Funds the transmitters -func deploy( - nodeList string, - publicKeys string, - configFile string, - skipFunding bool, - dryRun bool, - onlySetConfig bool, - artefacts string, -) { - env := helpers.SetupEnv(false) - ocrConfig := generateOCR3Config( - nodeList, - configFile, - env.ChainID, - publicKeys, - ) - - if dryRun { - fmt.Println("Dry run, skipping deployment and funding") - return - } - - if onlySetConfig { - fmt.Println("Skipping deployment of contracts and skipping funding transmitters, only setting config") - setOCR3Config(env, ocrConfig, artefacts) - return - } - - if ContractsAlreadyDeployed(artefacts) { - fmt.Println("Contracts already deployed") - return - } - - fmt.Println("Deploying keystone ocr3 contract...") - ocrContract := DeployKeystoneOCR3Capability(env) - fmt.Println("Deploying keystone forwarder contract...") - forwarderContract := DeployForwarder(env) - - fmt.Println("Writing deployed contract addresses to file...") - contracts := deployedContracts{ - OCRContract: ocrContract.Address(), - ForwarderContract: forwarderContract.Address(), - } - jsonBytes, err := json.Marshal(contracts) - PanicErr(err) - - err = os.WriteFile(DeployedContractsFilePath(artefacts), jsonBytes, 0600) - PanicErr(err) - - setOCR3Config(env, ocrConfig, artefacts) - - if skipFunding { - fmt.Println("Skipping funding transmitters") - return - } - fmt.Println("Funding transmitters...") - transmittersStr := []string{} - for _, t := range ocrConfig.Transmitters { - transmittersStr = append(transmittersStr, t.String()) - } - - helpers.FundNodes(env, transmittersStr, big.NewInt(50000000000000000)) // 0.05 ETH -} - -func setOCR3Config( - env helpers.Environment, - ocrConfig changeset.OCR3OnchainConfig, - artefacts string, -) { - loadedContracts, err := LoadDeployedContracts(artefacts) - PanicErr(err) - - ocrContract, err := ocr3_capability.NewOCR3Capability(loadedContracts.OCRContract, env.Ec) - PanicErr(err) - fmt.Println("Setting OCR3 contract config...") - tx, err := ocrContract.SetConfig(env.Owner, - ocrConfig.Signers, - ocrConfig.Transmitters, - ocrConfig.F, - ocrConfig.OnchainConfig, - ocrConfig.OffchainConfigVersion, - ocrConfig.OffchainConfig, - ) - PanicErr(err) - receipt := helpers.ConfirmTXMined(context.Background(), env.Ec, tx, env.ChainID) - - // Write blocknumber of the transaction to the deployed contracts file - loadedContracts.SetConfigTxBlock = receipt.BlockNumber.Uint64() - jsonBytes, err := json.Marshal(loadedContracts) - PanicErr(err) - err = os.WriteFile(DeployedContractsFilePath(artefacts), jsonBytes, 0600) - PanicErr(err) -} - -func LoadDeployedContracts(artefacts string) (deployedContracts, error) { - if !ContractsAlreadyDeployed(artefacts) { - return deployedContracts{}, fmt.Errorf("no deployed contracts found, run deploy first") - } - - jsonBytes, err := os.ReadFile(DeployedContractsFilePath(artefacts)) - if err != nil { - return deployedContracts{}, err - } - - var contracts deployedContracts - err = json.Unmarshal(jsonBytes, &contracts) - return contracts, err -} - -func ContractsAlreadyDeployed(artefacts string) bool { - _, err := os.Stat(DeployedContractsFilePath(artefacts)) - return err == nil -} - -func DeployedContractsFilePath(artefacts string) string { - return filepath.Join(artefacts, deployedContractsJSON) -} - -func DeployForwarder(e helpers.Environment) *forwarder.KeystoneForwarder { - _, tx, contract, err := forwarder.DeployKeystoneForwarder(e.Owner, e.Ec) - PanicErr(err) - helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) - - return contract -} - -func DeployKeystoneOCR3Capability(e helpers.Environment) *ocr3_capability.OCR3Capability { - _, tx, contract, err := ocr3_capability.DeployOCR3Capability(e.Owner, e.Ec) - PanicErr(err) - helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) - - return contract -} diff --git a/core/scripts/keystone/src/01_provision_keystone.go b/core/scripts/keystone/src/01_provision_keystone.go new file mode 100644 index 00000000000..c7a2dd97127 --- /dev/null +++ b/core/scripts/keystone/src/01_provision_keystone.go @@ -0,0 +1,217 @@ +package src + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strconv" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry_1_1_0" +) + +type provisionKeystone struct{} + +func NewProvisionKeystoneCommand() *provisionKeystone { + return &provisionKeystone{} +} + +func (g *provisionKeystone) Name() string { + return "provision-keystone" +} + +func (g *provisionKeystone) Run(args []string) { + fs := flag.NewFlagSet(g.Name(), flag.ExitOnError) + + // common flags + artefactsDir := fs.String("artefacts", defaultArtefactsDir, "Custom artefacts directory location") + nodeSetSize := fs.Int("nodesetsize", 5, "number of nodes in a nodeset") + nodeSetsPath := fs.String("nodesets", defaultNodeSetsPath, "Custom node sets location") + chainID := fs.Int64("chainid", 1337, "chain ID of the Ethereum network to deploy to") + + // preprovisioning flags + preprovison := fs.Bool("preprovision", false, "Preprovision crib") + + // provisioning flags + ethURL := fs.String("ethurl", "", "URL of the Ethereum node") + accountKey := fs.String("accountkey", "", "private key of the account to deploy from") + ocrConfigFile := fs.String("ocrfile", "ocr_config.json", "path to OCR config file") + p2pPort := fs.Int64("p2pport", 6690, "p2p port") + capabilitiesP2PPort := fs.Int64("capabilitiesp2pport", 6691, "p2p port for capabilities") + preprovisionConfigName := fs.String("preprovisionconfig", "crib-preprovision.yaml", "Name of the preprovision config file, stored in the artefacts directory") + postprovisionConfigName := fs.String("postprovisionconfig", "crib-postprovision.yaml", "Name of the postprovision config file, stored in the artefacts directory") + // additional flags + clean := fs.Bool("clean", false, "Clean up resources before provisioning") + + err := fs.Parse(args) + + if err != nil || (!*preprovison && (*ethURL == "" || *accountKey == "")) { + fs.Usage() + os.Exit(1) + } + + if *preprovison { + fmt.Println() + fmt.Println() + fmt.Println("========================") + fmt.Println("Writing Preprovisioning Config") + fmt.Println("========================") + fmt.Println() + fmt.Println() + writePreprovisionConfig(*nodeSetSize, filepath.Join(*artefactsDir, *preprovisionConfigName)) + return + } + + // We always want to start with a clean slate + /// when it comes to nodesets + err = os.RemoveAll(*nodeSetsPath) + PanicErr(err) + fmt.Println("Collecting node sets...") + nodeSets := downloadNodeSets(*chainID, *nodeSetsPath, *nodeSetSize) + + if *clean { + fmt.Println("Cleaning up resources") + for _, node := range nodeSets.Workflow.Nodes { + clearJobs(newNodeAPI(node)) + } + for _, node := range nodeSets.StreamsTrigger.Nodes { + clearJobs(newNodeAPI(node)) + } + os.RemoveAll(*artefactsDir) + } + + // Kinda hacky but it prevents us from refactoring the setupenv function which + // is used in many other places + os.Setenv("ETH_URL", *ethURL) + os.Setenv("ETH_CHAIN_ID", strconv.FormatInt(*chainID, 10)) + os.Setenv("ACCOUNT_KEY", *accountKey) + os.Setenv("INSECURE_SKIP_VERIFY", "true") + env := helpers.SetupEnv(false) + + provisionStreamsDON( + env, + nodeSets.StreamsTrigger, + *chainID, + *p2pPort, + *ocrConfigFile, + *artefactsDir, + ) + + reg := provisionCapabilitiesRegistry( + env, + nodeSets, + *chainID, + *artefactsDir, + ) + + onchainMeta := provisionWorkflowDON( + env, + nodeSets.Workflow, + *chainID, + *p2pPort, + *ocrConfigFile, + *artefactsDir, + reg, + ) + + fmt.Println() + fmt.Println() + fmt.Println("========================") + fmt.Println("Writing Postprovision Config") + fmt.Println("========================") + fmt.Println() + fmt.Println() + + writePostProvisionConfig( + nodeSets, + *chainID, + *capabilitiesP2PPort, + onchainMeta.Forwarder.Address().Hex(), + onchainMeta.CapabilitiesRegistry.Address().Hex(), + filepath.Join(*artefactsDir, *postprovisionConfigName), + ) +} + +func provisionCapabilitiesRegistry( + env helpers.Environment, + nodeSets NodeSets, + chainID int64, + artefactsDir string, +) kcr.CapabilitiesRegistryInterface { + fmt.Println() + fmt.Println() + fmt.Println("========================") + fmt.Println("Provisioning Capabilities Registry DON") + fmt.Println("========================") + fmt.Println() + fmt.Println() + reg := provisionCapabillitiesRegistry( + env, + nodeSets, + chainID, + artefactsDir, + ) + return reg +} + +func provisionStreamsDON( + env helpers.Environment, + nodeSet NodeSet, + chainID int64, + p2pPort int64, + ocrConfigFilePath string, + artefactsDir string, +) { + fmt.Println() + fmt.Println() + fmt.Println("========================") + fmt.Println("Provisioning streams DON") + fmt.Println("========================") + fmt.Println() + fmt.Println() + setupStreamsTrigger( + env, + nodeSet, + chainID, + p2pPort, + ocrConfigFilePath, + artefactsDir, + ) +} + +func provisionWorkflowDON( + env helpers.Environment, + nodeSet NodeSet, + chainID int64, + p2pPort int64, + ocrConfigFile string, + artefactsDir string, + reg kcr.CapabilitiesRegistryInterface, +) (onchainMeta *onchainMeta) { + fmt.Println() + fmt.Println() + fmt.Println("========================") + fmt.Println("Provisioning workflow DON") + fmt.Println("========================") + fmt.Println() + fmt.Println() + deployForwarder(env, artefactsDir) + + onchainMeta, _ = provisionOCR3( + env, + nodeSet, + chainID, + p2pPort, + ocrConfigFile, + artefactsDir, + ) + distributeFunds(nodeSet.NodeKeys, env) + + // We don't technically need the capability registry as a dependency + // as we just use it for a sanity check + // We could remove it so that we can execute provisioning in parallel + deployKeystoneWorkflowsTo(nodeSet, reg) + + return onchainMeta +} diff --git a/core/scripts/keystone/src/01_toolkit.go b/core/scripts/keystone/src/01_toolkit.go new file mode 100644 index 00000000000..6fe896667ce --- /dev/null +++ b/core/scripts/keystone/src/01_toolkit.go @@ -0,0 +1,212 @@ +// This sub CLI acts as a temporary shim for external aptos support + +package src + +import ( + "bufio" + "errors" + "flag" + "fmt" + "net/url" + "os" + "strconv" + "strings" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" +) + +type Toolkit struct{} + +func (t *Toolkit) Name() string { + return "toolkit" +} + +func NewToolkit() *Toolkit { + return &Toolkit{} +} + +func (t *Toolkit) Run(args []string) { + if len(args) < 1 { + fmt.Println("Available commands:") + fmt.Println(" deploy-workflows") + fmt.Println(" deploy-ocr3-contracts") + fmt.Println(" deploy-ocr3-jobspecs") + os.Exit(1) + } + + command := args[0] + cmdArgs := args[1:] + + switch command { + case "get-aptos-keys": + t.AptosKeys(cmdArgs) + case "deploy-workflows": + t.DeployWorkflows(cmdArgs) + case "deploy-ocr3-contracts": + t.ProvisionOCR3Contracts(cmdArgs) + case "deploy-ocr3-jobspecs": + t.DeployOCR3JobSpecs(cmdArgs) + default: + fmt.Printf("Unknown command: %s\n", command) + os.Exit(1) + } +} + +func (t *Toolkit) AptosKeys(args []string) { + fs := flag.NewFlagSet("get-aptos-keys", flag.ExitOnError) + nodesListPath := fs.String("nodes", ".cache/NodesList.txt", "Path to file with list of nodes") + artefacts := fs.String("artefacts", defaultArtefactsDir, "Custom artefacts directory location") + chainID := fs.Int64("chainid", 1337, "Chain ID") + + if err := fs.Parse(args); err != nil { + fs.Usage() + os.Exit(1) + } + + nodes := mustReadNodesList(*nodesListPath) + keys := mustFetchNodeKeys(*chainID, nodes, true) + + mustWriteJSON(*artefacts+"/pubnodekeys.json", keys) +} + +func (t *Toolkit) ProvisionOCR3Contracts(args []string) { + fs := flag.NewFlagSet("deploy-ocr3-contracts", flag.ExitOnError) + ethURL := fs.String("ethurl", "", "URL of the Ethereum node") + accountKey := fs.String("accountkey", "", "Private key of the deployer account") + chainID := fs.Int64("chainid", 1337, "Chain ID") + nodesListPath := fs.String("nodes", ".cache/NodesList.txt", "Path to file with list of nodes") + artefactsDir := fs.String("artefacts", defaultArtefactsDir, "Custom artefacts directory location") + ocrConfigFile := fs.String("ocrfile", "ocr_config.json", "Path to OCR config file") + + if err := fs.Parse(args); err != nil || *ethURL == "" || *accountKey == "" { + fs.Usage() + os.Exit(1) + } + + // Set environment variables required by setupenv + os.Setenv("ETH_URL", *ethURL) + os.Setenv("ETH_CHAIN_ID", strconv.FormatInt(*chainID, 10)) + os.Setenv("ACCOUNT_KEY", *accountKey) + os.Setenv("INSECURE_SKIP_VERIFY", "true") + + env := helpers.SetupEnv(false) + + nodes := mustReadNodesList(*nodesListPath) + nodeKeys := mustFetchNodeKeys(*chainID, nodes, true) + + deployOCR3Contract(nodeKeys, env, *ocrConfigFile, *artefactsDir) +} + +func (t *Toolkit) DeployOCR3JobSpecs(args []string) { + fs := flag.NewFlagSet("deploy-ocr3-jobspecs", flag.ExitOnError) + + ethURL := fs.String("ethurl", "", "URL of the Ethereum node") + accountKey := fs.String("accountkey", "", "Private key of the deployer account") + chainID := fs.Int64("chainid", 1337, "Chain ID") + nodesListPath := fs.String("nodes", ".cache/NodesList.txt", "Path to file with list of nodes") + p2pPort := fs.Int64("p2pport", 6690, "P2P port") + artefactsDir := fs.String("artefacts", defaultArtefactsDir, "Custom artefacts directory location") + + if err := fs.Parse(args); err != nil || *ethURL == "" || *accountKey == "" { + fs.Usage() + os.Exit(1) + } + + os.Setenv("ETH_URL", *ethURL) + os.Setenv("ETH_CHAIN_ID", strconv.FormatInt(*chainID, 10)) + os.Setenv("ACCOUNT_KEY", *accountKey) + os.Setenv("INSECURE_SKIP_VERIFY", "true") + + env := helpers.SetupEnv(false) + + nodes := mustReadNodesList(*nodesListPath) + nodeKeys := mustFetchNodeKeys(*chainID, nodes, true) + o := LoadOnchainMeta(*artefactsDir, env) + + deployOCR3JobSpecs( + nodes, + *chainID, + nodeKeys, + *p2pPort, + o, + ) +} + +func (t *Toolkit) DeployWorkflows(args []string) { + fs := flag.NewFlagSet("deploy-workflows", flag.ExitOnError) + workflowFile := fs.String("workflow", "", "Path to workflow file") + nodesList := fs.String("nodes", ".cache/NodesList.txt", "Path to file with list of nodes") + + if err := fs.Parse(args); err != nil || *workflowFile == "" { + fs.Usage() + os.Exit(1) + } + + nodesWithCreds := mustReadNodesList(*nodesList) + + for _, node := range nodesWithCreds { + api := newNodeAPI(node) + workflowContent, err := os.ReadFile(*workflowFile) + PanicErr(err) + + upsertJob(api, "workflow", string(workflowContent)) + fmt.Println("Workflow deployed successfully") + } +} + +// Reads in a list of nodes from a file, where each line is in the format: +// http://localhost:50100 http://chainlink.core.1:50100 notreal@fakeemail.ch fj293fbBnlQ!f9vNs +func mustReadNodesList(path string) []NodeWithCreds { + fmt.Println("Reading nodes list from", path) + nodesList, err := readLines(path) + helpers.PanicErr(err) + + nodes := make([]NodeWithCreds, 0, len(nodesList)) + for _, r := range nodesList { + rr := strings.TrimSpace(r) + if len(rr) == 0 { + continue + } + s := strings.Split(rr, " ") + if len(s) != 4 { + helpers.PanicErr(errors.New("wrong nodes list format")) + } + + r := SimpleURL{ + Scheme: "http", + Host: s[0], + } + u := SimpleURL{ + Scheme: "http", + Host: s[1], + } + remoteURL, err := url.Parse(u.String()) + PanicErr(err) + nodes = append(nodes, NodeWithCreds{ + URL: u, + RemoteURL: r, + // This is the equivalent of "chainlink.core.1" in our above example + ServiceName: remoteURL.Hostname(), + APILogin: s[2], + APIPassword: s[3], + KeystorePassword: "", + }) + } + + return nodes +} + +func readLines(path string) ([]string, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines, scanner.Err() +} diff --git a/core/scripts/keystone/src/01_toolkit_test.go b/core/scripts/keystone/src/01_toolkit_test.go new file mode 100644 index 00000000000..6f4a083940e --- /dev/null +++ b/core/scripts/keystone/src/01_toolkit_test.go @@ -0,0 +1,49 @@ +package src + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMustReadNodesList(t *testing.T) { + t.Run("valid nodes list", func(t *testing.T) { + content := "localhost:50100 chainlink.core.1:50100 user1 pass1\nlocalhost:50101 chainlink.core.2:50101 user2 pass2" + filePath := writeTempFile(t, content) + defer os.Remove(filePath) + + nodes := mustReadNodesList(filePath) + assert.Len(t, nodes, 2) + + assert.Equal(t, "user1", nodes[0].APILogin) + assert.Equal(t, "user2", nodes[1].APILogin) + + assert.Equal(t, "pass1", nodes[0].APIPassword) + assert.Equal(t, "pass2", nodes[1].APIPassword) + + assert.Equal(t, "http://localhost:50100", nodes[0].RemoteURL.String()) + assert.Equal(t, "http://localhost:50101", nodes[1].RemoteURL.String()) + + assert.Equal(t, "chainlink.core.1", nodes[0].ServiceName) + assert.Equal(t, "chainlink.core.2", nodes[1].ServiceName) + + assert.Equal(t, "http://chainlink.core.1:50100", nodes[0].URL.String()) + assert.Equal(t, "http://chainlink.core.2:50101", nodes[1].URL.String()) + }) +} + +func writeTempFile(t *testing.T, content string) string { + file, err := os.CreateTemp("", "nodeslist") + if err != nil { + t.Fatalf("failed to create temp file: %v", err) + } + defer file.Close() + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf("failed to write to temp file: %v", err) + } + + return file.Name() +} diff --git a/core/scripts/keystone/src/02_deploy_jobspecs_cmd.go b/core/scripts/keystone/src/02_deploy_jobspecs_cmd.go deleted file mode 100644 index 275943d6388..00000000000 --- a/core/scripts/keystone/src/02_deploy_jobspecs_cmd.go +++ /dev/null @@ -1,165 +0,0 @@ -package src - -import ( - "bytes" - "errors" - "flag" - "fmt" - "os" - "reflect" - "runtime" - "strings" - - "github.com/urfave/cli" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" - "github.com/smartcontractkit/chainlink/v2/core/cmd" -) - -type deployJobSpecs struct{} - -func NewDeployJobSpecsCommand() *deployJobSpecs { - return &deployJobSpecs{} -} - -func (g *deployJobSpecs) Name() string { - return "deploy-jobspecs" -} - -func (g *deployJobSpecs) Run(args []string) { - fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) - chainID := fs.Int64("chainid", 11155111, "chain id") - p2pPort := fs.Int64("p2pport", 6690, "p2p port") - onlyReplay := fs.Bool("onlyreplay", false, "only replay the block from the OCR3 contract setConfig transaction") - templatesLocation := fs.String("templates", "", "Custom templates location") - nodeList := fs.String("nodes", "", "Custom node list location") - publicKeys := fs.String("publickeys", "", "Custom public keys json location") - artefactsDir := fs.String("artefacts", "", "Custom artefacts directory location") - - err := fs.Parse(args) - if err != nil || chainID == nil || *chainID == 0 || p2pPort == nil || *p2pPort == 0 || onlyReplay == nil { - fs.Usage() - os.Exit(1) - } - if *onlyReplay { - fmt.Println("Only replaying OCR3 contract setConfig transaction") - } else { - fmt.Println("Deploying OCR3 job specs") - } - - if *artefactsDir == "" { - *artefactsDir = defaultArtefactsDir - } - if *publicKeys == "" { - *publicKeys = defaultPublicKeys - } - if *nodeList == "" { - *nodeList = defaultNodeList - } - if *templatesLocation == "" { - *templatesLocation = "templates" - } - - nodes := downloadNodeAPICredentials(*nodeList) - deployedContracts, err := LoadDeployedContracts(*artefactsDir) - PanicErr(err) - - jobspecs := genSpecs( - *publicKeys, - *nodeList, - *templatesLocation, - *chainID, *p2pPort, deployedContracts.OCRContract.Hex(), - ) - flattenedSpecs := []hostSpec{jobspecs.bootstrap} - flattenedSpecs = append(flattenedSpecs, jobspecs.oracles...) - - // sanity check arr lengths - if len(nodes) != len(flattenedSpecs) { - PanicErr(errors.New("Mismatched node and job spec lengths")) - } - - for i, n := range nodes { - output := &bytes.Buffer{} - client, app := newApp(n, output) - fmt.Println("Logging in:", n.url) - loginFs := flag.NewFlagSet("test", flag.ContinueOnError) - loginFs.Bool("bypass-version-check", true, "") - loginCtx := cli.NewContext(app, loginFs, nil) - err := client.RemoteLogin(loginCtx) - helpers.PanicErr(err) - output.Reset() - - if !*onlyReplay { - specToDeploy := flattenedSpecs[i].spec.ToString() - specFragment := flattenedSpecs[i].spec[0:1] - fmt.Printf("Deploying jobspec: %s\n... \n", specFragment) - fs := flag.NewFlagSet("test", flag.ExitOnError) - err = fs.Parse([]string{specToDeploy}) - - helpers.PanicErr(err) - err = client.CreateJob(cli.NewContext(app, fs, nil)) - if err != nil { - fmt.Println("Failed to deploy job spec:", specFragment, "Error:", err) - } - output.Reset() - } - - replayFs := flag.NewFlagSet("test", flag.ExitOnError) - flagSetApplyFromAction(client.ReplayFromBlock, replayFs, "") - err = replayFs.Set("block-number", fmt.Sprint(deployedContracts.SetConfigTxBlock)) - helpers.PanicErr(err) - err = replayFs.Set("evm-chain-id", fmt.Sprint(*chainID)) - helpers.PanicErr(err) - - fmt.Printf("Replaying from block: %d\n", deployedContracts.SetConfigTxBlock) - fmt.Printf("EVM Chain ID: %d\n\n", *chainID) - replayCtx := cli.NewContext(app, replayFs, nil) - err = client.ReplayFromBlock(replayCtx) - helpers.PanicErr(err) - } -} - -// flagSetApplyFromAction applies the flags from action to the flagSet. -// -// `parentCommand` will filter the app commands and only applies the flags if the command/subcommand has a parent with that name, if left empty no filtering is done -// -// Taken from: https://github.com/smartcontractkit/chainlink/blob/develop/core/cmd/shell_test.go#L590 -func flagSetApplyFromAction(action interface{}, flagSet *flag.FlagSet, parentCommand string) { - cliApp := cmd.Shell{} - app := cmd.NewApp(&cliApp) - - foundName := parentCommand == "" - actionFuncName := getFuncName(action) - - for _, command := range app.Commands { - flags := recursiveFindFlagsWithName(actionFuncName, command, parentCommand, foundName) - - for _, flag := range flags { - flag.Apply(flagSet) - } - } -} - -func recursiveFindFlagsWithName(actionFuncName string, command cli.Command, parent string, foundName bool) []cli.Flag { - if command.Action != nil { - if actionFuncName == getFuncName(command.Action) && foundName { - return command.Flags - } - } - - for _, subcommand := range command.Subcommands { - if !foundName { - foundName = strings.EqualFold(subcommand.Name, parent) - } - - found := recursiveFindFlagsWithName(actionFuncName, subcommand, parent, foundName) - if found != nil { - return found - } - } - return nil -} - -func getFuncName(i interface{}) string { - return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() -} diff --git a/core/scripts/keystone/src/02_deploy_keystone_workflows.go b/core/scripts/keystone/src/02_deploy_keystone_workflows.go new file mode 100644 index 00000000000..6c6580e21f0 --- /dev/null +++ b/core/scripts/keystone/src/02_deploy_keystone_workflows.go @@ -0,0 +1,134 @@ +package src + +import ( + "bytes" + "fmt" + "text/template" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + + kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry_1_1_0" +) + +func deployKeystoneWorkflowsTo(nodeSet NodeSet, reg kcr.CapabilitiesRegistryInterface) { + fmt.Println("Deploying Keystone workflow jobs") + caps, err := reg.GetCapabilities(&bind.CallOpts{}) + PanicErr(err) + + streams := NewStreamsTriggerV1Capability() + ocr3 := NewOCR3V1ConsensusCapability() + testnetWrite := NewEthereumGethTestnetV1WriteCapability() + + capSet := NewCapabilitySet(streams, ocr3, testnetWrite) + expectedHashedCIDs := capSet.HashedIDs(reg) + + // Check that the capabilities are registered + for _, c := range caps { + found := false + for _, expected := range expectedHashedCIDs { + if c.HashedId == expected { + found = true + break + } + } + + if !found { + panic(fmt.Sprintf("Capability %s not found in registry", c.HashedId)) + } + } + + feedIDs := []string{} + for _, feed := range feeds { + feedIDs = append(feedIDs, fmt.Sprintf("0x%x", feed.id)) + } + workflowConfig := WorkflowJobSpecConfig{ + JobSpecName: "keystone_workflow", + WorkflowOwnerAddress: "0x1234567890abcdef1234567890abcdef12345678", + FeedIDs: feedIDs, + TargetID: testnetWrite.GetID(), + ConsensusID: ocr3.GetID(), + TriggerID: streams.GetID(), + TargetAddress: "0x1234567890abcdef1234567890abcdef12345678", + } + jobSpecStr := createKeystoneWorkflowJob(workflowConfig) + for _, n := range nodeSet.Nodes[1:] { // skip the bootstrap node + api := newNodeAPI(n) + upsertJob(api, workflowConfig.JobSpecName, jobSpecStr) + } +} + +type WorkflowJobSpecConfig struct { + JobSpecName string + WorkflowOwnerAddress string + FeedIDs []string + TargetID string + ConsensusID string + TriggerID string + TargetAddress string +} + +func createKeystoneWorkflowJob(workflowConfig WorkflowJobSpecConfig) string { + const keystoneWorkflowTemplate = ` +type = "workflow" +schemaVersion = 1 +name = "{{ .JobSpecName }}" +workflow = """ +name: "ccip_kiab1" +owner: '{{ .WorkflowOwnerAddress }}' +triggers: + - id: streams-trigger@1.1.0 + config: + maxFrequencyMs: 10000 + feedIds: +{{- range .FeedIDs }} + - '{{ . }}' +{{- end }} + +consensus: + - id: offchain_reporting@1.0.0 + ref: ccip_feeds + inputs: + observations: + - $(trigger.outputs) + config: + report_id: '0001' + key_id: 'evm' + aggregation_method: data_feeds + aggregation_config: + feeds: +{{- range .FeedIDs }} + '{{ . }}': + deviation: '0.05' + heartbeat: 1800 +{{- end }} + encoder: EVM + encoder_config: + abi: "(bytes32 FeedID, uint224 Price, uint32 Timestamp)[] Reports" + abi: (bytes32 FeedID, uint224 Price, uint32 Timestamp)[] Reports + +targets: + - id: {{ .TargetID }} + inputs: + signed_report: $(ccip_feeds.outputs) + config: + address: '{{ .TargetAddress }}' + deltaStage: 5s + schedule: oneAtATime + +""" +workflowOwner = "{{ .WorkflowOwnerAddress }}" +` + + tmpl, err := template.New("workflow").Parse(keystoneWorkflowTemplate) + + if err != nil { + panic(err) + } + var renderedTemplate bytes.Buffer + err = tmpl.Execute(&renderedTemplate, workflowConfig) + if err != nil { + panic(err) + } + + return renderedTemplate.String() +} diff --git a/core/scripts/keystone/src/02_deploy_keystone_workflows_test.go b/core/scripts/keystone/src/02_deploy_keystone_workflows_test.go new file mode 100644 index 00000000000..bef6a768dce --- /dev/null +++ b/core/scripts/keystone/src/02_deploy_keystone_workflows_test.go @@ -0,0 +1,21 @@ +package src + +import ( + "testing" + + "github.com/gkampitakis/go-snaps/snaps" +) + +func TestCreateKeystoneWorkflowJob(t *testing.T) { + workflowConfig := WorkflowJobSpecConfig{ + JobSpecName: "keystone_workflow", + WorkflowOwnerAddress: "0x1234567890abcdef1234567890abcdef12345678", + FeedIDs: []string{"feed1", "feed2", "feed3"}, + TargetID: "target_id", + TargetAddress: "0xabcdefabcdefabcdefabcdefabcdefabcdef", + } + + output := createKeystoneWorkflowJob(workflowConfig) + + snaps.MatchSnapshot(t, output) +} diff --git a/core/scripts/keystone/src/02_fund_transmitters.go b/core/scripts/keystone/src/02_fund_transmitters.go new file mode 100644 index 00000000000..751a90d22d8 --- /dev/null +++ b/core/scripts/keystone/src/02_fund_transmitters.go @@ -0,0 +1,52 @@ +package src + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + + "github.com/smartcontractkit/chainlink-testing-framework/lib/utils/conversions" + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" +) + +func distributeFunds(nodeKeys []NodeKeys, env helpers.Environment) { + fmt.Println("Funding transmitters...") + transmittersStr := []string{} + fundingAmount := big.NewInt(500000000000000000) // 0.5 ETH + minThreshold := big.NewInt(50000000000000000) // 0.05 ETH + + for _, n := range nodeKeys { + balance, err := getBalance(n.EthAddress, env) + if err != nil { + fmt.Printf("Error fetching balance for %s: %v\n", n.EthAddress, err) + continue + } + if balance.Cmp(minThreshold) < 0 { + fmt.Printf( + "Transmitter %s has insufficient funds, funding with %s ETH. Current balance: %s, threshold: %s\n", + n.EthAddress, + conversions.WeiToEther(fundingAmount).String(), + conversions.WeiToEther(balance).String(), + conversions.WeiToEther(minThreshold).String(), + ) + transmittersStr = append(transmittersStr, n.EthAddress) + } + } + + if len(transmittersStr) > 0 { + helpers.FundNodes(env, transmittersStr, fundingAmount) + } else { + fmt.Println("All transmitters have sufficient funds.") + } +} + +func getBalance(address string, env helpers.Environment) (*big.Int, error) { + balance, err := env.Ec.BalanceAt(context.Background(), common.HexToAddress(address), nil) + if err != nil { + return nil, err + } + + return balance, nil +} diff --git a/core/scripts/keystone/src/02_provision_capabilities_registry.go b/core/scripts/keystone/src/02_provision_capabilities_registry.go new file mode 100644 index 00000000000..aa0f203f96b --- /dev/null +++ b/core/scripts/keystone/src/02_provision_capabilities_registry.go @@ -0,0 +1,68 @@ +package src + +import ( + "context" + "fmt" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry_1_1_0" +) + +func provisionCapabillitiesRegistry(env helpers.Environment, nodeSets NodeSets, chainID int64, artefactsDir string) kcr.CapabilitiesRegistryInterface { + fmt.Printf("Provisioning capabilities registry on chain %d\n", chainID) + ctx := context.Background() + reg := deployCR(ctx, artefactsDir, env) + crProvisioner := NewCapabilityRegistryProvisioner(reg, env) + streamsTriggerCapSet := NewCapabilitySet(NewStreamsTriggerV1Capability()) + workflowCapSet := NewCapabilitySet(NewOCR3V1ConsensusCapability(), NewEthereumGethTestnetV1WriteCapability()) + workflowDON := nodeKeysToDON(nodeSets.Workflow.Name, nodeSets.Workflow.NodeKeys[1:], workflowCapSet) + streamsTriggerDON := nodeKeysToDON(nodeSets.StreamsTrigger.Name, nodeSets.StreamsTrigger.NodeKeys[1:], streamsTriggerCapSet) + + crProvisioner.AddCapabilities(ctx, MergeCapabilitySets(streamsTriggerCapSet, workflowCapSet)) + dons := map[string]DON{workflowDON.Name: workflowDON, streamsTriggerDON.Name: streamsTriggerDON} + nodeOperator := NewNodeOperator(env.Owner.From, "MY_NODE_OPERATOR", dons) + crProvisioner.AddNodeOperator(ctx, nodeOperator) + + crProvisioner.AddNodes(ctx, nodeOperator, nodeSets.Workflow.Name, nodeSets.StreamsTrigger.Name) + + crProvisioner.AddDON(ctx, nodeOperator, nodeSets.Workflow.Name, true, true) + crProvisioner.AddDON(ctx, nodeOperator, nodeSets.StreamsTrigger.Name, true, false) + + return reg +} + +// nodeKeysToDON converts a slice of NodeKeys into a DON struct with the given name and CapabilitySet. +func nodeKeysToDON(donName string, nodeKeys []NodeKeys, capSet CapabilitySet) DON { + peers := []peer{} + for _, n := range nodeKeys { + p := peer{ + PeerID: n.P2PPeerID, + Signer: n.OCR2OnchainPublicKey, + } + peers = append(peers, p) + } + return DON{ + F: 1, + Name: donName, + Peers: peers, + CapabilitySet: capSet, + } +} + +func deployCR(ctx context.Context, artefactsDir string, env helpers.Environment) kcr.CapabilitiesRegistryInterface { + o := LoadOnchainMeta(artefactsDir, env) + // We always redeploy the capabilities registry to ensure it is up to date + // since we don't have diffing logic to determine if it has changed + // if o.CapabilitiesRegistry != nil { + // fmt.Println("CapabilitiesRegistry already deployed, skipping...") + // return o.CapabilitiesRegistry + // } + + _, tx, capabilitiesRegistry, innerErr := kcr.DeployCapabilitiesRegistry(env.Owner, env.Ec) + PanicErr(innerErr) + helpers.ConfirmContractDeployed(ctx, env.Ec, tx, env.ChainID) + + o.CapabilitiesRegistry = capabilitiesRegistry + WriteOnchainMeta(o, artefactsDir) + return capabilitiesRegistry +} diff --git a/core/scripts/keystone/src/02_provision_crib.go b/core/scripts/keystone/src/02_provision_crib.go new file mode 100644 index 00000000000..bf3a31f8b7e --- /dev/null +++ b/core/scripts/keystone/src/02_provision_crib.go @@ -0,0 +1,310 @@ +package src + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + + ocrcommontypes "github.com/smartcontractkit/libocr/commontypes" + "gopkg.in/yaml.v3" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + evmcfg "github.com/smartcontractkit/chainlink/v2/core/chains/evm/config/toml" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" + "github.com/smartcontractkit/chainlink/v2/core/config/toml" + "github.com/smartcontractkit/chainlink/v2/core/services/chainlink" +) + +type Helm struct { + Helm Chart `yaml:"helm"` +} + +type Chart struct { + HelmValues HelmValues `yaml:"values"` +} + +type HelmValues struct { + Chainlink Chainlink `yaml:"chainlink,omitempty"` + Ingress Ingress `yaml:"ingress,omitempty"` +} + +type Ingress struct { + Hosts []Host `yaml:"hosts,omitempty"` +} + +type Host struct { + Host string `yaml:"host,omitempty"` + HTTP HTTP `yaml:"http,omitempty"` +} + +type HTTP struct { + Paths []Path `yaml:"paths,omitempty"` +} + +type Path struct { + Path string `yaml:"path,omitempty"` + Backend Backend `yaml:"backend,omitempty"` +} + +type Backend struct { + Service Service `yaml:"service,omitempty"` +} + +type Service struct { + Name string `yaml:"name,omitempty"` + Port Port `yaml:"port,omitempty"` +} + +type Port struct { + Number int `yaml:"number,omitempty"` +} + +type Chainlink struct { + Nodes map[string]Node `yaml:"nodes,omitempty"` +} + +type Node struct { + Image string `yaml:"image,omitempty"` + OverridesToml string `yaml:"overridesToml,omitempty"` +} + +func writePreprovisionConfig(nodeSetSize int, outputPath string) { + chart := generatePreprovisionConfig(nodeSetSize) + + writeCribConfig(chart, outputPath) +} + +func writeCribConfig(chart Helm, outputPath string) { + yamlData, err := yaml.Marshal(chart) + helpers.PanicErr(err) + + if outputPath == "-" { + _, err = os.Stdout.Write(yamlData) + helpers.PanicErr(err) + } else { + ensureArtefactsDir(filepath.Dir(outputPath)) + err = os.WriteFile(outputPath, yamlData, 0600) + helpers.PanicErr(err) + } +} + +func generatePreprovisionConfig(nodeSetSize int) Helm { + nodeSets := []string{"ks-wf-", "ks-str-trig-"} + nodes := make(map[string]Node) + nodeNames := []string{} + + for nodeSetIndex, prefix := range nodeSets { + // Bootstrap node + btNodeName := fmt.Sprintf("%d-%sbt-node1", nodeSetIndex, prefix) + nodeNames = append(nodeNames, btNodeName) + nodes[btNodeName] = Node{ + Image: "${runtime.images.app}", + } + + // Other nodes + for i := 2; i <= nodeSetSize; i++ { + nodeName := fmt.Sprintf("%d-%snode%d", nodeSetIndex, prefix, i) + nodeNames = append(nodeNames, nodeName) + nodes[nodeName] = Node{ + Image: "${runtime.images.app}", + } + } + } + + ingress := generateIngress(nodeNames) + + helm := Helm{ + Chart{ + HelmValues: HelmValues{ + Chainlink: Chainlink{ + Nodes: nodes, + }, + Ingress: ingress, + }, + }, + } + + return helm +} + +func writePostProvisionConfig( + nodeSets NodeSets, + chainID int64, + capabilitiesP2PPort int64, + forwarderAddress string, + capabilitiesRegistryAddress string, + outputPath string, +) { + chart := generatePostprovisionConfig( + nodeSets, + chainID, + capabilitiesP2PPort, + forwarderAddress, + capabilitiesRegistryAddress, + ) + + writeCribConfig(chart, outputPath) +} + +func generatePostprovisionConfig( + nodeSets NodeSets, + chainID int64, + capabilitiesP2PPort int64, + forwarderAddress string, + capabillitiesRegistryAddress string, +) Helm { + nodes := make(map[string]Node) + nodeNames := []string{} + var capabilitiesBootstrapper *ocrcommontypes.BootstrapperLocator + + // Build nodes for each NodeSet + for nodeSetIndex, nodeSet := range []NodeSet{nodeSets.Workflow, nodeSets.StreamsTrigger} { + // Bootstrap node + btNodeName := fmt.Sprintf("%d-%sbt-node1", nodeSetIndex, nodeSet.Prefix) + // Note this line ordering is important, + // we assign capabilitiesBootstrapper after we generate overrides so that + // we do not include the bootstrapper config to itself + overridesToml := generateOverridesToml( + chainID, + capabilitiesP2PPort, + capabillitiesRegistryAddress, + "", + "", + capabilitiesBootstrapper, + nodeSet.Name, + ) + nodes[btNodeName] = Node{ + Image: "${runtime.images.app}", + OverridesToml: overridesToml, + } + if nodeSet.Name == WorkflowNodeSetName { + workflowBtNodeKey := nodeSets.Workflow.NodeKeys[0] // First node key as bootstrapper + wfBt, err := ocrcommontypes.NewBootstrapperLocator(workflowBtNodeKey.P2PPeerID, []string{fmt.Sprintf("%s:%d", nodeSets.Workflow.Nodes[0].ServiceName, capabilitiesP2PPort)}) + helpers.PanicErr(err) + capabilitiesBootstrapper = wfBt + } + nodeNames = append(nodeNames, btNodeName) + + // Other nodes + for i, nodeKey := range nodeSet.NodeKeys[1:] { // Start from second key + nodeName := fmt.Sprintf("%d-%snode%d", nodeSetIndex, nodeSet.Prefix, i+2) + nodeNames = append(nodeNames, nodeName) + overridesToml := generateOverridesToml( + chainID, + capabilitiesP2PPort, + capabillitiesRegistryAddress, + nodeKey.EthAddress, + forwarderAddress, + capabilitiesBootstrapper, + nodeSet.Name, + ) + nodes[nodeName] = Node{ + Image: "${runtime.images.app}", + OverridesToml: overridesToml, + } + } + } + + ingress := generateIngress(nodeNames) + + helm := Helm{ + Chart{ + HelmValues: HelmValues{ + Chainlink: Chainlink{ + Nodes: nodes, + }, + Ingress: ingress, + }, + }, + } + + return helm +} + +func generateOverridesToml( + chainID int64, + capabilitiesP2PPort int64, + externalRegistryAddress string, + fromAddress string, + forwarderAddress string, + capabilitiesBootstrapper *ocrcommontypes.BootstrapperLocator, + nodeSetName string, +) string { + evmConfig := &evmcfg.EVMConfig{ + ChainID: big.NewI(chainID), + Nodes: nil, // We have the rpc nodes set globally + } + + conf := chainlink.Config{ + Core: toml.Core{ + Capabilities: toml.Capabilities{ + ExternalRegistry: toml.ExternalRegistry{ + Address: ptr(externalRegistryAddress), + NetworkID: ptr("evm"), + ChainID: ptr(strconv.FormatInt(chainID, 10)), + }, + Peering: toml.P2P{ + V2: toml.P2PV2{ + Enabled: ptr(true), + ListenAddresses: ptr([]string{fmt.Sprintf("0.0.0.0:%d", capabilitiesP2PPort)}), + }, + }, + }, + }, + } + + if capabilitiesBootstrapper != nil { + conf.Core.Capabilities.Peering.V2.DefaultBootstrappers = ptr([]ocrcommontypes.BootstrapperLocator{*capabilitiesBootstrapper}) + + if nodeSetName == WorkflowNodeSetName { + evmConfig.Workflow = evmcfg.Workflow{ + FromAddress: ptr(evmtypes.MustEIP55Address(fromAddress)), + ForwarderAddress: ptr(evmtypes.MustEIP55Address(forwarderAddress)), + } + } + } + + conf.EVM = evmcfg.EVMConfigs{ + evmConfig, + } + + confStr, err := conf.TOMLString() + helpers.PanicErr(err) + + return confStr +} + +// New function to generate Ingress +func generateIngress(nodeNames []string) Ingress { + hosts := make([]Host, 0, len(nodeNames)) + + for _, nodeName := range nodeNames { + host := Host{ + Host: fmt.Sprintf("${DEVSPACE_NAMESPACE}-%s.${DEVSPACE_INGRESS_BASE_DOMAIN}", nodeName), + HTTP: HTTP{ + Paths: []Path{ + { + Path: "/", + Backend: Backend{ + Service: Service{ + Name: "app-" + nodeName, + Port: Port{ + Number: 6688, + }, + }, + }, + }, + }, + }, + } + hosts = append(hosts, host) + } + + return Ingress{ + Hosts: hosts, + } +} + +func ptr[T any](t T) *T { return &t } diff --git a/core/scripts/keystone/src/02_provision_crib_test.go b/core/scripts/keystone/src/02_provision_crib_test.go new file mode 100644 index 00000000000..4e4cbf1efd5 --- /dev/null +++ b/core/scripts/keystone/src/02_provision_crib_test.go @@ -0,0 +1,44 @@ +package src + +import ( + "strings" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/gkampitakis/go-snaps/snaps" + "gopkg.in/yaml.v3" +) + +func TestGeneratePostprovisionConfig(t *testing.T) { + chainID := int64(1337) + capabilitiesP2PPort := int64(6691) + nodeSetsPath := "./testdata/node_sets.json" + nodeSetSize := 5 + forwarderAddress := common.Address([20]byte{0: 1}).Hex() + capabilitiesRegistryAddress := common.Address([20]byte{0: 2}).Hex() + nodeSets := downloadNodeSets(chainID, nodeSetsPath, nodeSetSize) + + chart := generatePostprovisionConfig(nodeSets, chainID, capabilitiesP2PPort, forwarderAddress, capabilitiesRegistryAddress) + + yamlData, err := yaml.Marshal(chart) + if err != nil { + t.Fatalf("Failed to marshal chart: %v", err) + } + + linesStr := strings.Split(string(yamlData), "\n") + snaps.MatchSnapshot(t, strings.Join(linesStr, "\n")) +} + +func TestGeneratePreprovisionConfig(t *testing.T) { + nodeSetSize := 5 + + chart := generatePreprovisionConfig(nodeSetSize) + + yamlData, err := yaml.Marshal(chart) + if err != nil { + t.Fatalf("Failed to marshal chart: %v", err) + } + + linesStr := strings.Split(string(yamlData), "\n") + snaps.MatchSnapshot(t, strings.Join(linesStr, "\n")) +} diff --git a/core/scripts/keystone/src/02_provision_forwarder_contract.go b/core/scripts/keystone/src/02_provision_forwarder_contract.go new file mode 100644 index 00000000000..4cc3d7f70fa --- /dev/null +++ b/core/scripts/keystone/src/02_provision_forwarder_contract.go @@ -0,0 +1,33 @@ +package src + +import ( + "context" + "fmt" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/forwarder" +) + +func deployForwarder( + env helpers.Environment, + artefacts string, +) { + o := LoadOnchainMeta(artefacts, env) + if o.Forwarder != nil { + fmt.Println("Forwarder contract already deployed, skipping") + return + } + + fmt.Println("Deploying forwarder contract...") + forwarderContract := DeployForwarder(env) + o.Forwarder = forwarderContract + WriteOnchainMeta(o, artefacts) +} + +func DeployForwarder(e helpers.Environment) *forwarder.KeystoneForwarder { + _, tx, contract, err := forwarder.DeployKeystoneForwarder(e.Owner, e.Ec) + PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), e.Ec, tx, e.ChainID) + + return contract +} diff --git a/core/scripts/keystone/src/02_provision_ocr3_capability.go b/core/scripts/keystone/src/02_provision_ocr3_capability.go new file mode 100644 index 00000000000..cd80bf4238b --- /dev/null +++ b/core/scripts/keystone/src/02_provision_ocr3_capability.go @@ -0,0 +1,286 @@ +package src + +import ( + "bytes" + "strconv" + "text/template" + + "context" + "flag" + "fmt" + + "github.com/smartcontractkit/chainlink/deployment" + + ksdeploy "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/ocr3_capability" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" +) + +func provisionOCR3( + env helpers.Environment, + nodeSet NodeSet, + chainID int64, + p2pPort int64, + ocrConfigFile string, + artefactsDir string, +) (onchainMeta *onchainMeta, cacheHit bool) { + nodeKeys := nodeSet.NodeKeys + nodes := nodeSet.Nodes + + onchainMeta, cacheHit = deployOCR3Contract( + nodeKeys, + env, + ocrConfigFile, + artefactsDir, + ) + + deployOCR3JobSpecs( + nodes, + chainID, + nodeKeys, + p2pPort, + onchainMeta, + ) + + return +} + +func deployOCR3Contract( + nodeKeys []NodeKeys, + env helpers.Environment, + configFile string, + artefacts string, +) (o *onchainMeta, cacheHit bool) { + o = LoadOnchainMeta(artefacts, env) + ocrConf := generateOCR3Config( + nodeKeys, + configFile, + ) + + if o.OCR3 != nil { + // types.ConfigDigestPrefixKeystoneOCR3Capability + fmt.Println("OCR3 Contract already deployed, checking config...") + latestConfigDigestBytes, err := o.OCR3.LatestConfigDetails(nil) + PanicErr(err) + latestConfigDigest, err := types.BytesToConfigDigest(latestConfigDigestBytes.ConfigDigest[:]) + PanicErr(err) + + cc := ocrConfToContractConfig(ocrConf, latestConfigDigestBytes.ConfigCount) + digester := evm.OCR3CapabilityOffchainConfigDigester{ + ChainID: uint64(env.ChainID), //nolint:gosec // this won't overflow + ContractAddress: o.OCR3.Address(), + } + digest, err := digester.ConfigDigest(context.Background(), cc) + PanicErr(err) + + if digest.Hex() == latestConfigDigest.Hex() { + fmt.Printf("OCR3 Contract already deployed with the same config (digest: %s), skipping...\n", digest.Hex()) + return o, false + } + + fmt.Printf("OCR3 Contract contains a different config, updating...\nOld digest: %s\nNew digest: %s\n", latestConfigDigest.Hex(), digest.Hex()) + setOCRConfig(o, env, ocrConf, artefacts) + return o, true + } + + fmt.Println("Deploying keystone ocr3 contract...") + _, tx, ocrContract, err := ocr3_capability.DeployOCR3Capability(env.Owner, env.Ec) + PanicErr(err) + helpers.ConfirmContractDeployed(context.Background(), env.Ec, tx, env.ChainID) + o.OCR3 = ocrContract + setOCRConfig(o, env, ocrConf, artefacts) + + return o, true +} + +func generateOCR3Config(nodeKeys []NodeKeys, configFile string) ksdeploy.OCR3OnchainConfig { + topLevelCfg := mustReadOCR3Config(configFile) + cfg := topLevelCfg.OracleConfig + secrets := deployment.XXXGenerateTestOCRSecrets() + c, err := ksdeploy.GenerateOCR3Config(cfg, nodeKeysToKsDeployNodeKeys(nodeKeys[1:]), secrets) // skip the bootstrap node + helpers.PanicErr(err) + return c +} + +func setOCRConfig(o *onchainMeta, env helpers.Environment, ocrConf ksdeploy.OCR3OnchainConfig, artefacts string) { + fmt.Println("Setting OCR3 contract config...") + tx, err := o.OCR3.SetConfig(env.Owner, + ocrConf.Signers, + ocrConf.Transmitters, + ocrConf.F, + ocrConf.OnchainConfig, + ocrConf.OffchainConfigVersion, + ocrConf.OffchainConfig, + ) + PanicErr(err) + receipt := helpers.ConfirmTXMined(context.Background(), env.Ec, tx, env.ChainID) + o.SetConfigTxBlock = receipt.BlockNumber.Uint64() + WriteOnchainMeta(o, artefacts) +} + +func deployOCR3JobSpecs( + nodes []NodeWithCreds, + chainID int64, + nodeKeys []NodeKeys, + p2pPort int64, + onchainMeta *onchainMeta, +) { + ocrAddress := onchainMeta.OCR3.Address().Hex() + bootstrapURI := fmt.Sprintf("%s@%s:%d", nodeKeys[0].P2PPeerID, nodes[0].ServiceName, p2pPort) + + var specName string + for i, n := range nodes { + var spec string + + if i == 0 { + bootstrapSpecConfig := BootstrapJobSpecConfig{ + JobSpecName: "ocr3_bootstrap", + OCRConfigContractAddress: ocrAddress, + ChainID: chainID, + } + specName = bootstrapSpecConfig.JobSpecName + spec = createBootstrapJobSpec(bootstrapSpecConfig) + } else { + oc := OracleJobSpecConfig{ + JobSpecName: "ocr3_oracle", + OCRConfigContractAddress: ocrAddress, + OCRKeyBundleID: nodeKeys[i].OCR2BundleID, + BootstrapURI: bootstrapURI, + TransmitterID: nodeKeys[i].EthAddress, + ChainID: chainID, + AptosKeyBundleID: nodeKeys[i].AptosBundleID, + } + specName = oc.JobSpecName + spec = createOracleJobSpec(oc) + } + + api := newNodeAPI(n) + upsertJob(api, specName, spec) + + fmt.Printf("Replaying from block: %d\n", onchainMeta.SetConfigTxBlock) + fmt.Printf("EVM Chain ID: %d\n\n", chainID) + api.withFlags(api.methods.ReplayFromBlock, func(fs *flag.FlagSet) { + err := fs.Set("block-number", strconv.FormatUint(onchainMeta.SetConfigTxBlock, 10)) + helpers.PanicErr(err) + err = fs.Set("evm-chain-id", strconv.FormatInt(chainID, 10)) + helpers.PanicErr(err) + }).mustExec() + } +} + +func mustReadOCR3Config(fileName string) (output ksdeploy.TopLevelConfigSource) { + return mustReadJSON[ksdeploy.TopLevelConfigSource](fileName) +} + +func nodeKeysToKsDeployNodeKeys(nks []NodeKeys) []ksdeploy.NodeKeys { + keys := []ksdeploy.NodeKeys{} + for _, nk := range nks { + keys = append(keys, ksdeploy.NodeKeys{ + EthAddress: nk.EthAddress, + AptosAccount: nk.AptosAccount, + AptosBundleID: nk.AptosBundleID, + AptosOnchainPublicKey: nk.AptosOnchainPublicKey, + P2PPeerID: nk.P2PPeerID, + OCR2BundleID: nk.OCR2BundleID, + OCR2OnchainPublicKey: nk.OCR2OnchainPublicKey, + OCR2OffchainPublicKey: nk.OCR2OffchainPublicKey, + OCR2ConfigPublicKey: nk.OCR2ConfigPublicKey, + CSAPublicKey: nk.CSAPublicKey, + }) + } + return keys +} + +// BootstrapJobSpecConfig holds configuration for the bootstrap job spec +type BootstrapJobSpecConfig struct { + JobSpecName string + OCRConfigContractAddress string + ChainID int64 +} + +// OracleJobSpecConfig holds configuration for the oracle job spec +type OracleJobSpecConfig struct { + JobSpecName string + OCRConfigContractAddress string + OCRKeyBundleID string + BootstrapURI string + TransmitterID string + ChainID int64 + AptosKeyBundleID string +} + +func createBootstrapJobSpec(config BootstrapJobSpecConfig) string { + const bootstrapTemplate = ` +type = "bootstrap" +schemaVersion = 1 +name = "{{ .JobSpecName }}" +contractID = "{{ .OCRConfigContractAddress }}" +relay = "evm" + +[relayConfig] +chainID = "{{ .ChainID }}" +providerType = "ocr3-capability" +` + + tmpl, err := template.New("bootstrap").Parse(bootstrapTemplate) + if err != nil { + panic(err) + } + + var rendered bytes.Buffer + err = tmpl.Execute(&rendered, config) + if err != nil { + panic(err) + } + + return rendered.String() +} + +func createOracleJobSpec(config OracleJobSpecConfig) string { + const oracleTemplate = ` +type = "offchainreporting2" +schemaVersion = 1 +name = "{{ .JobSpecName }}" +contractID = "{{ .OCRConfigContractAddress }}" +ocrKeyBundleID = "{{ .OCRKeyBundleID }}" +p2pv2Bootstrappers = [ + "{{ .BootstrapURI }}", +] +relay = "evm" +pluginType = "plugin" +transmitterID = "{{ .TransmitterID }}" + +[relayConfig] +chainID = "{{ .ChainID }}" + +[pluginConfig] +command = "chainlink-ocr3-capability" +ocrVersion = 3 +pluginName = "ocr-capability" +providerType = "ocr3-capability" +telemetryType = "plugin" + +[onchainSigningStrategy] +strategyName = 'multi-chain' +[onchainSigningStrategy.config] +evm = "{{ .OCRKeyBundleID }}" +aptos = "{{ .AptosKeyBundleID }}" +` + + tmpl, err := template.New("oracle").Parse(oracleTemplate) + if err != nil { + panic(err) + } + + var rendered bytes.Buffer + err = tmpl.Execute(&rendered, config) + if err != nil { + panic(err) + } + + return rendered.String() +} diff --git a/core/scripts/keystone/src/02_provision_ocr3_capability_test.go b/core/scripts/keystone/src/02_provision_ocr3_capability_test.go new file mode 100644 index 00000000000..df4d9a41f5c --- /dev/null +++ b/core/scripts/keystone/src/02_provision_ocr3_capability_test.go @@ -0,0 +1,67 @@ +package src + +import ( + "errors" + "fmt" + "testing" + + "github.com/gkampitakis/go-snaps/match" + "github.com/gkampitakis/go-snaps/snaps" +) + +func TestGenerateOCR3Config(t *testing.T) { + // Generate OCR3 config + nodeSet := downloadNodeSets(1337, "./testdata/node_sets.json", 4) + nodeKeys := nodeSet.Workflow.NodeKeys + config := generateOCR3Config(nodeKeys, "./testdata/SampleConfig.json") + + matchOffchainConfig := match.Custom("OffchainConfig", func(s any) (any, error) { + // coerce the value to a string + s, ok := s.(string) + if !ok { + return nil, errors.New("offchain config is not a string") + } + + // if the string is not empty + if s == "" { + return nil, errors.New("offchain config is empty") + } + + return "", nil + }) + + snaps.MatchJSON(t, config, matchOffchainConfig) +} + +func TestGenSpecs(t *testing.T) { + nodeSetsPath := "./testdata/node_sets.json" + chainID := int64(1337) + p2pPort := int64(6690) + contractAddress := "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" + nodeSet := downloadNodeSets(chainID, nodeSetsPath, 4).Workflow + + // Create Bootstrap Job Spec + bootstrapConfig := BootstrapJobSpecConfig{ + JobSpecName: "ocr3_bootstrap", + OCRConfigContractAddress: contractAddress, + ChainID: chainID, + } + bootstrapSpec := createBootstrapJobSpec(bootstrapConfig) + + // Create Oracle Job Spec + oracleConfig := OracleJobSpecConfig{ + JobSpecName: "ocr3_oracle", + OCRConfigContractAddress: contractAddress, + OCRKeyBundleID: nodeSet.NodeKeys[1].OCR2BundleID, + BootstrapURI: fmt.Sprintf("%s@%s:%d", nodeSet.NodeKeys[0].P2PPeerID, nodeSet.Nodes[0].ServiceName, p2pPort), + TransmitterID: nodeSet.NodeKeys[1].P2PPeerID, + ChainID: chainID, + AptosKeyBundleID: nodeSet.NodeKeys[1].AptosBundleID, + } + oracleSpec := createOracleJobSpec(oracleConfig) + + // Combine Specs + generatedSpecs := fmt.Sprintf("%s\n\n%s", bootstrapSpec, oracleSpec) + + snaps.MatchSnapshot(t, generatedSpecs) +} diff --git a/core/scripts/keystone/src/02_provision_streams_trigger_capability.go b/core/scripts/keystone/src/02_provision_streams_trigger_capability.go new file mode 100644 index 00000000000..f476319362b --- /dev/null +++ b/core/scripts/keystone/src/02_provision_streams_trigger_capability.go @@ -0,0 +1,522 @@ +package src + +// This package deploys "offchainreporting2" job specs, which setup the streams trigger +// for the targeted node set +// See https://github.com/smartcontractkit/chainlink/blob/4d5fc1943bd6a60b49cbc3d263c0aa47dc3cecb7/core/services/ocr2/plugins/mercury/integration_test.go#L92 +// for how to setup the mercury portion of the streams trigger +// You can see how all fields are being used here: https://github.com/smartcontractkit/chainlink/blob/4d5fc1943bd6a60b49cbc3d263c0aa47dc3cecb7/core/services/ocr2/plugins/mercury/helpers_test.go#L314 +// https://github.com/smartcontractkit/infra-k8s/blob/be47098adfb605d79b5bab6aa601bcf443a6c48b/projects/chainlink/files/chainlink-clusters/cl-keystone-cap-one/config.yaml#L1 +// Trigger gets added to the registry here: https://github.com/smartcontractkit/chainlink/blob/4d5fc1943bd6a60b49cbc3d263c0aa47dc3cecb7/core/services/relay/evm/evm.go#L360 +// See integration workflow here: https://github.com/smartcontractkit/chainlink/blob/4d5fc1943bd6a60b49cbc3d263c0aa47dc3cecb7/core/capabilities/integration_tests/workflow.go#L15 +import ( + "bytes" + "context" + "crypto/ed25519" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "html/template" + "math" + "math/big" + "time" + + "net/url" + + "github.com/ethereum/go-ethereum/common" + "github.com/shopspring/decimal" + + "github.com/ethereum/go-ethereum/core/types" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/confighelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3confighelper" + + ocrtypes "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + + mercurytypes "github.com/smartcontractkit/chainlink-common/pkg/types/mercury" + datastreamsmercury "github.com/smartcontractkit/chainlink-data-streams/mercury" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + "github.com/smartcontractkit/chainlink/deployment" + "github.com/smartcontractkit/chainlink/v2/core/bridges" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm" + "github.com/smartcontractkit/chainlink/v2/core/services/relay/evm/mercury" + + verifierContract "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" + + "github.com/smartcontractkit/chainlink/v2/core/store/models" + "github.com/smartcontractkit/chainlink/v2/core/web/presenters" +) + +type feed struct { + id [32]byte + name string + + // we create a bridge for each feed + bridgeName string + bridgeURL string +} + +func v3FeedID(id [32]byte) [32]byte { + binary.BigEndian.PutUint16(id[:2], 3) + return id +} + +var feeds = []feed{ + { + v3FeedID([32]byte{5: 1}), + "BTC/USD", + "mock-bridge-btc", + "http://external-adapter:4001", + }, + { + v3FeedID([32]byte{5: 2}), + "LINK/USD", + "mock-bridge-link", + "http://external-adapter:4002", + }, + { + v3FeedID([32]byte{5: 3}), + "NATIVE/USD", + "mock-bridge-native", + "http://external-adapter:4003", + }, +} + +// See /core/services/ocr2/plugins/mercury/integration_test.go +func setupStreamsTrigger( + env helpers.Environment, + nodeSet NodeSet, + chainID int64, + p2pPort int64, + ocrConfigFilePath string, + artefactsDir string, +) { + fmt.Printf("Deploying streams trigger for chain %d\n", chainID) + fmt.Printf("Using OCR config file: %s\n", ocrConfigFilePath) + + fmt.Printf("Deploying Mercury V0.3 contracts\n") + verifier := deployMercuryV03Contracts(env, artefactsDir) + + fmt.Printf("Generating Mercury OCR config\n") + ocrConfig := generateMercuryOCR2Config(nodeSet.NodeKeys[1:]) // skip the bootstrap node + + for _, feed := range feeds { + fmt.Println("Configuring feeds...") + fmt.Printf("FeedID: %x\n", feed.id) + fmt.Printf("FeedName: %s\n", feed.name) + fmt.Printf("BridgeName: %s\n", feed.bridgeName) + fmt.Printf("BridgeURL: %s\n", feed.bridgeURL) + + latestConfigDetails, err := verifier.LatestConfigDetails(nil, feed.id) + PanicErr(err) + latestConfigDigest, err := ocrtypes.BytesToConfigDigest(latestConfigDetails.ConfigDigest[:]) + PanicErr(err) + + digester := mercury.NewOffchainConfigDigester( + feed.id, + big.NewInt(chainID), + verifier.Address(), + ocrtypes.ConfigDigestPrefixMercuryV02, + ) + configDigest, err := digester.ConfigDigest( + context.Background(), + mercuryOCRConfigToContractConfig( + ocrConfig, + latestConfigDetails.ConfigCount, + ), + ) + PanicErr(err) + + if configDigest.Hex() == latestConfigDigest.Hex() { + fmt.Printf("Verifier already deployed with the same config (digest: %s), skipping...\n", configDigest.Hex()) + } else { + fmt.Printf("Verifier contains a different config, updating...\nOld digest: %s\nNew digest: %s\n", latestConfigDigest.Hex(), configDigest.Hex()) + tx, err := verifier.SetConfig( + env.Owner, + feed.id, + ocrConfig.Signers, + ocrConfig.Transmitters, + ocrConfig.F, + ocrConfig.OnchainConfig, + ocrConfig.OffchainConfigVersion, + ocrConfig.OffchainConfig, + nil, + ) + helpers.ConfirmTXMined(context.Background(), env.Ec, tx, env.ChainID) + PanicErr(err) + } + + fmt.Printf("Deploying OCR2 job specs for feed %s\n", feed.name) + deployOCR2JobSpecsForFeed(nodeSet, verifier, feed, chainID, p2pPort) + } + + fmt.Println("Finished deploying streams trigger") +} + +func deployMercuryV03Contracts(env helpers.Environment, artefactsDir string) verifierContract.VerifierInterface { + var confirmDeploy = func(tx *types.Transaction, err error) { + helpers.ConfirmContractDeployed(context.Background(), env.Ec, tx, env.ChainID) + PanicErr(err) + } + o := LoadOnchainMeta(artefactsDir, env) + + if o.VerifierProxy != nil { + fmt.Printf("Verifier proxy contract already deployed at %s\n", o.VerifierProxy.Address()) + } else { + fmt.Printf("Deploying verifier proxy contract\n") + _, tx, verifierProxy, err := verifier_proxy.DeployVerifierProxy(env.Owner, env.Ec, common.Address{}) // zero address for access controller disables access control + confirmDeploy(tx, err) + o.VerifierProxy = verifierProxy + WriteOnchainMeta(o, artefactsDir) + } + + if o.Verifier == nil { + fmt.Printf("Deploying verifier contract\n") + _, tx, verifier, err := verifierContract.DeployVerifier(env.Owner, env.Ec, o.VerifierProxy.Address()) + confirmDeploy(tx, err) + o.Verifier = verifier + WriteOnchainMeta(o, artefactsDir) + } else { + fmt.Printf("Verifier contract already deployed at %s\n", o.Verifier.Address().Hex()) + } + + if o.InitializedVerifierAddress != o.Verifier.Address() { + fmt.Printf("Current initialized verifier address (%s) differs from the new verifier address (%s). Initializing verifier.\n", o.InitializedVerifierAddress.Hex(), o.Verifier.Address().Hex()) + tx, err := o.VerifierProxy.InitializeVerifier(env.Owner, o.Verifier.Address()) + receipt := helpers.ConfirmTXMined(context.Background(), env.Ec, tx, env.ChainID) + PanicErr(err) + inited, err := o.VerifierProxy.ParseVerifierInitialized(*receipt.Logs[0]) + PanicErr(err) + o.InitializedVerifierAddress = inited.VerifierAddress + WriteOnchainMeta(o, artefactsDir) + } else { + fmt.Printf("Verifier %s already initialized\n", o.Verifier.Address().Hex()) + } + + return o.Verifier +} + +func deployOCR2JobSpecsForFeed(nodeSet NodeSet, verifier verifierContract.VerifierInterface, feed feed, chainID int64, p2pPort int64) { + // we assign the first node as the bootstrap node + for i, n := range nodeSet.NodeKeys { + // parallel arrays + api := newNodeAPI(nodeSet.Nodes[i]) + jobSpecName := "" + jobSpecStr := "" + + upsertBridge(api, feed.bridgeName, feed.bridgeURL) + + if i == 0 { + // Prepare data for Bootstrap Job + bootstrapData := MercuryV3BootstrapJobSpecData{ + FeedName: feed.name, + VerifierAddress: verifier.Address().Hex(), + FeedID: fmt.Sprintf("%x", feed.id), + ChainID: chainID, + } + + // Create Bootstrap Job + jobSpecName, jobSpecStr = createMercuryV3BootstrapJob(bootstrapData) + } else { + // Prepare data for Mercury V3 Job + mercuryData := MercuryV3JobSpecData{ + FeedName: "feed-" + feed.name, + BootstrapHost: fmt.Sprintf("%s@%s:%d", nodeSet.NodeKeys[0].P2PPeerID, nodeSet.Nodes[0].ServiceName, p2pPort), + VerifierAddress: verifier.Address().Hex(), + Bridge: feed.bridgeName, + NodeCSAKey: n.CSAPublicKey, + FeedID: fmt.Sprintf("%x", feed.id), + LinkFeedID: fmt.Sprintf("%x", feeds[1].id), + NativeFeedID: fmt.Sprintf("%x", feeds[2].id), + OCRKeyBundleID: n.OCR2BundleID, + ChainID: chainID, + } + + // Create Mercury V3 Job + jobSpecName, jobSpecStr = createMercuryV3OracleJob(mercuryData) + } + + upsertJob(api, jobSpecName, jobSpecStr) + } +} + +// Template definitions +const mercuryV3OCR2bootstrapJobTemplate = ` +type = "bootstrap" +relay = "evm" +schemaVersion = 1 +name = "{{ .Name }}" +contractID = "{{ .VerifierAddress }}" +feedID = "0x{{ .FeedID }}" +contractConfigTrackerPollInterval = "1s" + +[relayConfig] +chainID = {{ .ChainID }} +enableTriggerCapability = true +` + +const mercuryV3OCR2OracleJobTemplate = ` +type = "offchainreporting2" +schemaVersion = 1 +name = "{{ .Name }}" +p2pv2Bootstrappers = ["{{ .BootstrapHost }}"] +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "{{ .VerifierAddress }}" +feedID = "0x{{ .FeedID }}" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "{{ .OCRKeyBundleID }}" +relay = "evm" +pluginType = "mercury" +transmitterID = "{{ .NodeCSAKey }}" +observationSource = """ + price [type=bridge name="{{ .Bridge }}" timeout="50ms" requestData=""]; + + benchmark_price [type=jsonparse path="result,mid" index=0]; + price -> benchmark_price; + + bid_price [type=jsonparse path="result,bid" index=1]; + price -> bid_price; + + ask_price [type=jsonparse path="result,ask" index=2]; + price -> ask_price; +""" + +[relayConfig] +enableTriggerCapability = true +chainID = "{{ .ChainID }}" +` + +// Data structures +type MercuryV3BootstrapJobSpecData struct { + FeedName string + // Automatically generated from FeedName + Name string + VerifierAddress string + FeedID string + ChainID int64 +} + +type MercuryV3JobSpecData struct { + FeedName string + // Automatically generated from FeedName + Name string + BootstrapHost string + VerifierAddress string + Bridge string + NodeCSAKey string + FeedID string + LinkFeedID string + NativeFeedID string + OCRKeyBundleID string + ChainID int64 +} + +// createMercuryV3BootstrapJob creates a bootstrap job specification using the provided data. +func createMercuryV3BootstrapJob(data MercuryV3BootstrapJobSpecData) (name string, jobSpecStr string) { + name = "boot-" + data.FeedName + data.Name = name + + fmt.Printf("Creating bootstrap job (%s):\nverifier address: %s\nfeed name: %s\nfeed ID: %s\nchain ID: %d\n", + name, data.VerifierAddress, data.FeedName, data.FeedID, data.ChainID) + + tmpl, err := template.New("bootstrapJob").Parse(mercuryV3OCR2bootstrapJobTemplate) + PanicErr(err) + + var buf bytes.Buffer + err = tmpl.Execute(&buf, data) + PanicErr(err) + + jobSpecStr = buf.String() + + return name, jobSpecStr +} + +// createMercuryV3OracleJob creates a Mercury V3 job specification using the provided data. +func createMercuryV3OracleJob(data MercuryV3JobSpecData) (name string, jobSpecStr string) { + name = "mercury-" + data.FeedName + data.Name = name + fmt.Printf("Creating ocr2 job(%s):\nOCR key bundle ID: %s\nverifier address: %s\nbridge: %s\nnodeCSAKey: %s\nfeed name: %s\nfeed ID: %s\nlink feed ID: %s\nnative feed ID: %s\nchain ID: %d\n", + data.Name, data.OCRKeyBundleID, data.VerifierAddress, data.Bridge, data.NodeCSAKey, data.FeedName, data.FeedID, data.LinkFeedID, data.NativeFeedID, data.ChainID) + + tmpl, err := template.New("mercuryV3Job").Parse(mercuryV3OCR2OracleJobTemplate) + PanicErr(err) + + var buf bytes.Buffer + err = tmpl.Execute(&buf, data) + PanicErr(err) + + jobSpecStr = buf.String() + + return data.Name, jobSpecStr +} + +func strToBytes32(str string) [32]byte { + pkBytes, err := hex.DecodeString(str) + helpers.PanicErr(err) + + pkBytesFixed := [ed25519.PublicKeySize]byte{} + n := copy(pkBytesFixed[:], pkBytes) + if n != ed25519.PublicKeySize { + fmt.Printf("wrong num elements copied (%s): %d != 32\n", str, n) + panic("wrong num elements copied") + } + return pkBytesFixed +} + +func upsertBridge(api *nodeAPI, name string, eaURL string) { + u, err := url.Parse(eaURL) + helpers.PanicErr(err) + url := models.WebURL(*u) + // Confirmations and MinimumContractPayment are not used, so we can leave them as 0 + b := bridges.BridgeTypeRequest{ + Name: bridges.MustParseBridgeName(name), + URL: url, + } + payloadb, err := json.Marshal(b) + helpers.PanicErr(err) + payload := string(payloadb) + + bridgeActionType := bridgeAction(api, b) + switch bridgeActionType { + case shouldCreateBridge: + fmt.Printf("Creating bridge (%s): %s\n", name, eaURL) + resp := api.withArg(payload).mustExec(api.methods.CreateBridge) + resource := mustJSON[presenters.BridgeResource](resp) + fmt.Printf("Created bridge: %s %s\n", resource.Name, resource.URL) + case shouldUpdateBridge: + fmt.Println("Updating existing bridge") + api.withArgs(name, payload).mustExec(api.methods.UpdateBridge) + fmt.Println("Updated bridge", name) + case shouldNoChangeBridge: + fmt.Println("No changes needed for bridge", name) + } +} + +// create enum for 3 states: create, update, no change +var ( + shouldCreateBridge = 0 + shouldUpdateBridge = 1 + shouldNoChangeBridge = 2 +) + +func bridgeAction(api *nodeAPI, existingBridge bridges.BridgeTypeRequest) int { + resp, err := api.withArg(existingBridge.Name.String()).exec(api.methods.ShowBridge) + if err != nil { + return shouldCreateBridge + } + + b := mustJSON[presenters.BridgeResource](resp) + fmt.Printf("Found matching bridge: %s with URL: %s\n", b.Name, b.URL) + if b.URL == existingBridge.URL.String() { + return shouldNoChangeBridge + } + return shouldUpdateBridge +} + +func generateMercuryOCR2Config(nca []NodeKeys) MercuryOCR2Config { + ctx := context.Background() + f := uint8(1) + rawOnchainConfig := mercurytypes.OnchainConfig{ + Min: big.NewInt(0), + Max: big.NewInt(math.MaxInt64), + } + + // Values were taken from Data Streams 250ms feeds, given by @austinborn + rawReportingPluginConfig := datastreamsmercury.OffchainConfig{ + ExpirationWindow: 86400, + BaseUSDFee: decimal.NewFromInt(0), + } + + onchainConfig, err := (datastreamsmercury.StandardOnchainConfigCodec{}).Encode(ctx, rawOnchainConfig) + helpers.PanicErr(err) + reportingPluginConfig, err := json.Marshal(rawReportingPluginConfig) + helpers.PanicErr(err) + + onchainPubKeys := []common.Address{} + for _, n := range nca { + onchainPubKeys = append(onchainPubKeys, common.HexToAddress(n.OCR2OnchainPublicKey)) + } + + offchainPubKeysBytes := []ocrtypes.OffchainPublicKey{} + for _, n := range nca { + pkBytesFixed := strToBytes32(n.OCR2OffchainPublicKey) + offchainPubKeysBytes = append(offchainPubKeysBytes, ocrtypes.OffchainPublicKey(pkBytesFixed)) + } + + configPubKeysBytes := []ocrtypes.ConfigEncryptionPublicKey{} + for _, n := range nca { + pkBytesFixed := strToBytes32(n.OCR2ConfigPublicKey) + configPubKeysBytes = append(configPubKeysBytes, ocrtypes.ConfigEncryptionPublicKey(pkBytesFixed)) + } + + identities := []confighelper.OracleIdentityExtra{} + for index := range nca { + transmitterAccount := ocrtypes.Account(nca[index].CSAPublicKey) + + identities = append(identities, confighelper.OracleIdentityExtra{ + OracleIdentity: confighelper.OracleIdentity{ + OnchainPublicKey: onchainPubKeys[index][:], + OffchainPublicKey: offchainPubKeysBytes[index], + PeerID: nca[index].P2PPeerID, + TransmitAccount: transmitterAccount, + }, + ConfigEncryptionPublicKey: configPubKeysBytes[index], + }) + } + + secrets := deployment.XXXGenerateTestOCRSecrets() + // Values were taken from Data Streams 250ms feeds, given by @austinborn + signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig, err := ocr3confighelper.ContractSetConfigArgsDeterministic( + secrets.EphemeralSk, + secrets.SharedSecret, + 10*time.Second, // DeltaProgress + 10*time.Second, // DeltaResend + 400*time.Millisecond, // DeltaInitial + 5*time.Second, // DeltaRound + 0, // DeltaGrace + 1*time.Second, // DeltaCertifiedCommitRequest + 0, // DeltaStage + 25, // rMax + []int{len(identities)}, // S + identities, + reportingPluginConfig, // reportingPluginConfig []byte, + nil, // maxDurationInitialization *time.Duration, + 0, // maxDurationQuery time.Duration, + 250*time.Millisecond, // Max duration observation + 0, // Max duration should accept attested report + 0, // Max duration should transmit accepted report + int(f), // f + onchainConfig, + ) + PanicErr(err) + signerAddresses, err := evm.OnchainPublicKeyToAddress(signers) + PanicErr(err) + + offChainTransmitters := make([][32]byte, len(nca)) + for i, n := range nca { + offChainTransmitters[i] = strToBytes32(n.CSAPublicKey) + } + + config := MercuryOCR2Config{ + Signers: signerAddresses, + Transmitters: offChainTransmitters, + F: f, + OnchainConfig: onchainConfig, + OffchainConfigVersion: offchainConfigVersion, + OffchainConfig: offchainConfig, + } + + return config +} + +type MercuryOCR2Config struct { + Signers []common.Address + Transmitters [][32]byte + F uint8 + OnchainConfig []byte + OffchainConfigVersion uint64 + OffchainConfig []byte +} diff --git a/core/scripts/keystone/src/02_provision_streams_trigger_capability_test.go b/core/scripts/keystone/src/02_provision_streams_trigger_capability_test.go new file mode 100644 index 00000000000..3a2234ba4d7 --- /dev/null +++ b/core/scripts/keystone/src/02_provision_streams_trigger_capability_test.go @@ -0,0 +1,57 @@ +package src + +import ( + "fmt" + "net/url" + "testing" + + "github.com/gkampitakis/go-snaps/snaps" +) + +var ( + chainID = int64(123456) + feedID = fmt.Sprintf("%x", [32]byte{0: 1}) + feedName = "BTC/USD" + verifierAddress = fmt.Sprintf("0x%x", [20]byte{0: 7}) +) + +func TestCreateMercuryV3Job(t *testing.T) { + ocrKeyBundleID := "ocr_key_bundle_id" + nodeCSAKey := "node_csa_key" + bridgeName := "bridge_name" + linkFeedID := fmt.Sprintf("%x", [32]byte{0: 2}) + nativeFeedID := fmt.Sprintf("%x", [32]byte{0: 3}) + u, err := url.Parse("https://crib-henry-keystone-node1.main.stage.cldev.sh") + if err != nil { + t.Fatal(err) + } + + jobConfigData := MercuryV3JobSpecData{ + BootstrapHost: u.Hostname(), + VerifierAddress: verifierAddress, + OCRKeyBundleID: ocrKeyBundleID, + NodeCSAKey: nodeCSAKey, + Bridge: bridgeName, + FeedName: feedName, + FeedID: feedID, + LinkFeedID: linkFeedID, + NativeFeedID: nativeFeedID, + ChainID: chainID, + } + _, output := createMercuryV3OracleJob(jobConfigData) + + snaps.MatchSnapshot(t, output) +} + +func TestCreateMercuryBootstrapJob(t *testing.T) { + jobConfigData := MercuryV3BootstrapJobSpecData{ + FeedName: feedName, + FeedID: feedID, + ChainID: chainID, + VerifierAddress: verifierAddress, + } + + _, output := createMercuryV3BootstrapJob(jobConfigData) + + snaps.MatchSnapshot(t, output) +} diff --git a/core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd.go b/core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd.go deleted file mode 100644 index 6b98951459e..00000000000 --- a/core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd.go +++ /dev/null @@ -1,86 +0,0 @@ -package src - -import ( - "flag" - "os" - "path/filepath" - "strings" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" -) - -type generateCribClusterOverrides struct{} - -func NewGenerateCribClusterOverridesCommand() *generateCribClusterOverrides { - return &generateCribClusterOverrides{} -} - -func (g *generateCribClusterOverrides) Name() string { - return "generate-crib" -} - -func (g *generateCribClusterOverrides) Run(args []string) { - fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) - chainID := fs.Int64("chainid", 11155111, "chain id") - outputPath := fs.String("outpath", "../crib", "the path to output the generated overrides") - publicKeys := fs.String("publickeys", "", "Custom public keys json location") - nodeList := fs.String("nodes", "", "Custom node list location") - artefactsDir := fs.String("artefacts", "", "Custom artefacts directory location") - - templatesDir := "templates" - err := fs.Parse(args) - if err != nil || outputPath == nil || *outputPath == "" || chainID == nil || *chainID == 0 { - fs.Usage() - os.Exit(1) - } - - if *artefactsDir == "" { - *artefactsDir = defaultArtefactsDir - } - if *publicKeys == "" { - *publicKeys = defaultPublicKeys - } - if *nodeList == "" { - *nodeList = defaultNodeList - } - - deployedContracts, err := LoadDeployedContracts(*artefactsDir) - helpers.PanicErr(err) - - lines := generateCribConfig(*nodeList, *publicKeys, chainID, templatesDir, deployedContracts.ForwarderContract.Hex()) - - cribOverridesStr := strings.Join(lines, "\n") - err = os.WriteFile(filepath.Join(*outputPath, "crib-cluster-overrides.yaml"), []byte(cribOverridesStr), 0600) - helpers.PanicErr(err) -} - -func generateCribConfig(nodeList string, pubKeysPath string, chainID *int64, templatesDir string, forwarderAddress string) []string { - nca := downloadNodePubKeys(nodeList, *chainID, pubKeysPath) - nodeAddresses := []string{} - - for _, node := range nca[1:] { - nodeAddresses = append(nodeAddresses, node.EthAddress) - } - - lines, err := readLines(filepath.Join(templatesDir, cribOverrideTemplate)) - helpers.PanicErr(err) - lines = replaceCribPlaceholders(lines, forwarderAddress, nodeAddresses) - return lines -} - -func replaceCribPlaceholders( - lines []string, - forwarderAddress string, - nodeFromAddresses []string, -) (output []string) { - for _, l := range lines { - l = strings.Replace(l, "{{ forwarder_address }}", forwarderAddress, 1) - l = strings.Replace(l, "{{ node_2_address }}", nodeFromAddresses[0], 1) - l = strings.Replace(l, "{{ node_3_address }}", nodeFromAddresses[1], 1) - l = strings.Replace(l, "{{ node_4_address }}", nodeFromAddresses[2], 1) - l = strings.Replace(l, "{{ node_5_address }}", nodeFromAddresses[3], 1) - output = append(output, l) - } - - return output -} diff --git a/core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd_test.go b/core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd_test.go deleted file mode 100644 index 53d43c2342f..00000000000 --- a/core/scripts/keystone/src/03_gen_crib_cluster_overrides_cmd_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package src - -import ( - "strings" - "testing" - - "github.com/gkampitakis/go-snaps/snaps" -) - -func TestGenerateCribConfig(t *testing.T) { - chainID := int64(11155111) - templatesDir := "../templates" - forwarderAddress := "0x1234567890abcdef" - publicKeysPath := "./testdata/PublicKeys.json" - - lines := generateCribConfig(defaultNodeList, publicKeysPath, &chainID, templatesDir, forwarderAddress) - - snaps.MatchSnapshot(t, strings.Join(lines, "\n")) -} diff --git a/core/scripts/keystone/src/04_delete_ocr3_jobs_cmd.go b/core/scripts/keystone/src/04_delete_ocr3_jobs_cmd.go deleted file mode 100644 index 136691962dd..00000000000 --- a/core/scripts/keystone/src/04_delete_ocr3_jobs_cmd.go +++ /dev/null @@ -1,101 +0,0 @@ -package src - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "os" - - "github.com/urfave/cli" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" -) - -type deleteJobs struct{} - -type OCRSpec struct { - ContractID string -} - -type BootSpec struct { - ContractID string -} - -type WorkflowSpec struct { - WorkflowID string -} - -type JobSpec struct { - Id string - Name string - BootstrapSpec BootSpec - OffChainReporting2OracleSpec OCRSpec - WorkflowSpec WorkflowSpec -} - -func NewDeleteJobsCommand() *deleteJobs { - return &deleteJobs{} -} - -func (g *deleteJobs) Name() string { - return "delete-ocr3-jobs" -} - -func (g *deleteJobs) Run(args []string) { - fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) - nodeList := fs.String("nodes", "", "Custom node list location") - artefactsDir := fs.String("artefacts", "", "Custom artefacts directory location") - - err := fs.Parse(args) - if err != nil { - fs.Usage() - os.Exit(1) - } - - if *artefactsDir == "" { - *artefactsDir = defaultArtefactsDir - } - if *nodeList == "" { - *nodeList = defaultNodeList - } - - deployedContracts, err := LoadDeployedContracts(*artefactsDir) - helpers.PanicErr(err) - nodes := downloadNodeAPICredentials(*nodeList) - - for _, node := range nodes { - output := &bytes.Buffer{} - client, app := newApp(node, output) - - fmt.Println("Logging in:", node.url) - loginFs := flag.NewFlagSet("test", flag.ContinueOnError) - loginFs.Bool("bypass-version-check", true, "") - loginCtx := cli.NewContext(app, loginFs, nil) - err := client.RemoteLogin(loginCtx) - helpers.PanicErr(err) - output.Reset() - - fileFs := flag.NewFlagSet("test", flag.ExitOnError) - err = client.ListJobs(cli.NewContext(app, fileFs, nil)) - helpers.PanicErr(err) - - var parsed []JobSpec - err = json.Unmarshal(output.Bytes(), &parsed) - helpers.PanicErr(err) - - for _, jobSpec := range parsed { - if jobSpec.BootstrapSpec.ContractID == deployedContracts.OCRContract.String() || - jobSpec.OffChainReporting2OracleSpec.ContractID == deployedContracts.OCRContract.String() { - fmt.Println("Deleting OCR3 job ID:", jobSpec.Id, "name:", jobSpec.Name) - set := flag.NewFlagSet("test", flag.ExitOnError) - err = set.Parse([]string{jobSpec.Id}) - helpers.PanicErr(err) - err = client.DeleteJob(cli.NewContext(app, set, nil)) - helpers.PanicErr(err) - } - } - - output.Reset() - } -} diff --git a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go index b7fc9df2c88..203c473a4b7 100644 --- a/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go +++ b/core/scripts/keystone/src/05_deploy_initialize_capabilities_registry.go @@ -2,19 +2,14 @@ package src import ( "context" - "encoding/hex" "flag" "fmt" "log" "os" - "strings" "github.com/ethereum/go-ethereum/common" "google.golang.org/protobuf/proto" - ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" - - capabilitiespb "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb" "github.com/smartcontractkit/chainlink-common/pkg/values" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -30,7 +25,7 @@ type peer struct { } var ( - workflowDonPeers = []peer{ + hardcodedWorkflowDonPeers = []peer{ { PeerID: "12D3KooWQXfwA26jysiKKPXKuHcJtWTbGSwzoJxj4rYtEJyQTnFj", Signer: "0xC44686106b85687F741e1d6182a5e2eD2211a115", @@ -70,74 +65,6 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Name() string { return "deploy-and-initialize-capabilities-registry" } -func peerIDToB(peerID string) ([32]byte, error) { - var peerIDB ragetypes.PeerID - err := peerIDB.UnmarshalText([]byte(peerID)) - if err != nil { - return [32]byte{}, err - } - - return peerIDB, nil -} - -func peers(ps []peer) ([][32]byte, error) { - out := [][32]byte{} - for _, p := range ps { - b, err := peerIDToB(p.PeerID) - if err != nil { - return nil, err - } - - out = append(out, b) - } - - return out, nil -} - -func peerToNode(nopID uint32, p peer) (kcr.CapabilitiesRegistryNodeParams, error) { - peerIDB, err := peerIDToB(p.PeerID) - if err != nil { - return kcr.CapabilitiesRegistryNodeParams{}, fmt.Errorf("failed to convert peerID: %w", err) - } - - sig := strings.TrimPrefix(p.Signer, "0x") - signerB, err := hex.DecodeString(sig) - if err != nil { - return kcr.CapabilitiesRegistryNodeParams{}, fmt.Errorf("failed to convert signer: %w", err) - } - - keyStr := strings.TrimPrefix(p.EncryptionPublicKey, "0x") - encKey, err := hex.DecodeString(keyStr) - if err != nil { - return kcr.CapabilitiesRegistryNodeParams{}, fmt.Errorf("failed to convert encryptionPublicKey: %w", err) - } - - var sigb [32]byte - var encKeyB [32]byte - copy(sigb[:], signerB) - copy(encKeyB[:], encKey) - - return kcr.CapabilitiesRegistryNodeParams{ - NodeOperatorId: nopID, - P2pId: peerIDB, - Signer: sigb, - EncryptionPublicKey: encKeyB, - }, nil -} - -// newCapabilityConfig returns a new capability config with the default config set as empty. -// Override the empty default config with functional options. -func newCapabilityConfig(opts ...func(*values.Map)) *capabilitiespb.CapabilityConfig { - dc := values.EmptyMap() - for _, opt := range opts { - opt(dc) - } - - return &capabilitiespb.CapabilityConfig{ - DefaultConfig: values.ProtoMap(dc), - } -} - // withDefaultConfig returns a function that sets the default config for a capability by merging // the provided map with the existing default config. This is a shallow merge. func withDefaultConfig(m map[string]any) func(*values.Map) { @@ -163,7 +90,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) { // create flags for all of the env vars then set the env vars to normalize the interface // this is a bit of a hack but it's the easiest way to make this work ethUrl := fs.String("ethurl", "", "URL of the Ethereum node") - chainID := fs.Int64("chainid", 11155111, "chain ID of the Ethereum network to deploy to") + chainID := fs.Int64("chainid", 1337, "chain ID of the Ethereum network to deploy to") accountKey := fs.String("accountkey", "", "private key of the account to deploy from") capabilityRegistryAddress := fs.String("craddress", "", "address of the capability registry") @@ -179,6 +106,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) { os.Setenv("ETH_URL", *ethUrl) os.Setenv("ETH_CHAIN_ID", fmt.Sprintf("%d", *chainID)) os.Setenv("ACCOUNT_KEY", *accountKey) + os.Setenv("INSECURE_SKIP_VERIFY", "true") env := helpers.SetupEnv(false) @@ -288,7 +216,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) { nopID := recLog.NodeOperatorId nodes := []kcr.CapabilitiesRegistryNodeParams{} - for _, wfPeer := range workflowDonPeers { + for _, wfPeer := range hardcodedWorkflowDonPeers { n, innerErr := peerToNode(nopID, wfPeer) if innerErr != nil { panic(innerErr) @@ -306,7 +234,7 @@ func (c *deployAndInitializeCapabilitiesRegistryCommand) Run(args []string) { helpers.ConfirmTXMined(ctx, env.Ec, tx, env.ChainID) // workflow DON - ps, err := peers(workflowDonPeers) + ps, err := peers(hardcodedWorkflowDonPeers) if err != nil { panic(err) } diff --git a/core/scripts/keystone/src/06_deploy_workflows_cmd.go b/core/scripts/keystone/src/06_deploy_workflows_cmd.go deleted file mode 100644 index 0ca8e5d4a7b..00000000000 --- a/core/scripts/keystone/src/06_deploy_workflows_cmd.go +++ /dev/null @@ -1,71 +0,0 @@ -package src - -import ( - "bytes" - "errors" - "flag" - "fmt" - "os" - - "github.com/urfave/cli" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" -) - -type deployWorkflows struct{} - -func NewDeployWorkflowsCommand() *deployWorkflows { - return &deployWorkflows{} -} - -func (g *deployWorkflows) Name() string { - return "deploy-workflows" -} - -func (g *deployWorkflows) Run(args []string) { - fs := flag.NewFlagSet(g.Name(), flag.ContinueOnError) - workflowFile := fs.String("workflow", "workflow.yml", "path to workflow file") - nodeList := fs.String("nodes", "", "Custom node list location") - err := fs.Parse(args) - if err != nil || workflowFile == nil || *workflowFile == "" { - fs.Usage() - os.Exit(1) - } - if *nodeList == "" { - *nodeList = defaultNodeList - } - fmt.Println("Deploying workflows") - - // use a separate list - nodes := downloadNodeAPICredentials(*nodeList) - - if _, err = os.Stat(*workflowFile); err != nil { - PanicErr(errors.New("toml file does not exist")) - } - - for i, n := range nodes { - if i == 0 { - continue // skip bootstrap node - } - output := &bytes.Buffer{} - client, app := newApp(n, output) - fmt.Println("Logging in:", n.url) - loginFs := flag.NewFlagSet("test", flag.ContinueOnError) - loginFs.Bool("bypass-version-check", true, "") - loginCtx := cli.NewContext(app, loginFs, nil) - err := client.RemoteLogin(loginCtx) - helpers.PanicErr(err) - output.Reset() - - fmt.Printf("Deploying workflow\n... \n") - fs := flag.NewFlagSet("test", flag.ExitOnError) - err = fs.Parse([]string{*workflowFile}) - - helpers.PanicErr(err) - err = client.CreateJob(cli.NewContext(app, fs, nil)) - if err != nil { - fmt.Println("Failed to deploy workflow:", "Error:", err) - } - output.Reset() - } -} diff --git a/core/scripts/keystone/src/07_delete_workflows_cmd.go b/core/scripts/keystone/src/07_delete_workflows_cmd.go deleted file mode 100644 index cccedaf9e70..00000000000 --- a/core/scripts/keystone/src/07_delete_workflows_cmd.go +++ /dev/null @@ -1,74 +0,0 @@ -package src - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "os" - - "github.com/urfave/cli" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" -) - -type deleteWorkflows struct{} - -func NewDeleteWorkflowsCommand() *deleteWorkflows { - return &deleteWorkflows{} -} - -func (g *deleteWorkflows) Name() string { - return "delete-workflows" -} - -func (g *deleteWorkflows) Run(args []string) { - fs := flag.NewFlagSet(g.Name(), flag.ExitOnError) - nodeList := fs.String("nodes", "", "Custom node list location") - - err := fs.Parse(args) - if err != nil { - fs.Usage() - os.Exit(1) - } - - if *nodeList == "" { - *nodeList = defaultNodeList - } - - nodes := downloadNodeAPICredentials(*nodeList) - - for _, node := range nodes { - output := &bytes.Buffer{} - client, app := newApp(node, output) - - fmt.Println("Logging in:", node.url) - loginFs := flag.NewFlagSet("test", flag.ContinueOnError) - loginFs.Bool("bypass-version-check", true, "") - loginCtx := cli.NewContext(app, loginFs, nil) - err := client.RemoteLogin(loginCtx) - helpers.PanicErr(err) - output.Reset() - - fileFs := flag.NewFlagSet("test", flag.ExitOnError) - err = client.ListJobs(cli.NewContext(app, fileFs, nil)) - helpers.PanicErr(err) - - var parsed []JobSpec - err = json.Unmarshal(output.Bytes(), &parsed) - helpers.PanicErr(err) - - for _, jobSpec := range parsed { - if jobSpec.WorkflowSpec.WorkflowID != "" { - fmt.Println("Deleting workflow job ID:", jobSpec.Id, "name:", jobSpec.Name) - set := flag.NewFlagSet("test", flag.ExitOnError) - err = set.Parse([]string{jobSpec.Id}) - helpers.PanicErr(err) - err = client.DeleteJob(cli.NewContext(app, set, nil)) - helpers.PanicErr(err) - } - } - - output.Reset() - } -} diff --git a/core/scripts/keystone/src/88_capabilities_registry_helpers.go b/core/scripts/keystone/src/88_capabilities_registry_helpers.go new file mode 100644 index 00000000000..5494375aa4f --- /dev/null +++ b/core/scripts/keystone/src/88_capabilities_registry_helpers.go @@ -0,0 +1,578 @@ +package src + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + gethCommon "github.com/ethereum/go-ethereum/common" + gethTypes "github.com/ethereum/go-ethereum/core/types" + ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + + capabilitiespb "github.com/smartcontractkit/chainlink-common/pkg/capabilities/pb" + "github.com/smartcontractkit/chainlink-common/pkg/values" + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + evmclient "github.com/smartcontractkit/chainlink/v2/core/chains/evm/client" + evmtypes "github.com/smartcontractkit/chainlink/v2/core/chains/evm/types" + kcr "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry_1_1_0" +) + +type CapabilityRegistryProvisioner struct { + reg kcr.CapabilitiesRegistryInterface + env helpers.Environment +} + +func NewCapabilityRegistryProvisioner(reg kcr.CapabilitiesRegistryInterface, env helpers.Environment) *CapabilityRegistryProvisioner { + return &CapabilityRegistryProvisioner{reg: reg, env: env} +} + +func extractRevertReason(errData string, a abi.ABI) (string, string, error) { + data, err := hex.DecodeString(errData[2:]) + if err != nil { + return "", "", err + } + + for errName, abiError := range a.Errors { + if bytes.Equal(data[:4], abiError.ID.Bytes()[:4]) { + // Found a matching error + v, err := abiError.Unpack(data) + if err != nil { + return "", "", err + } + b, err := json.Marshal(v) + if err != nil { + return "", "", err + } + return errName, string(b), nil + } + } + return "", "", errors.New("revert Reason could not be found for given abistring") +} + +func (c *CapabilityRegistryProvisioner) testCallContract(method string, args ...interface{}) error { + abi := evmtypes.MustGetABI(kcr.CapabilitiesRegistryABI) + data, err := abi.Pack(method, args...) + helpers.PanicErr(err) + cAddress := c.reg.Address() + gasPrice, err := c.env.Ec.SuggestGasPrice(context.Background()) + helpers.PanicErr(err) + + msg := ethereum.CallMsg{ + From: c.env.Owner.From, + To: &cAddress, + Data: data, + Gas: 10_000_000, + GasPrice: gasPrice, + } + _, err = c.env.Ec.CallContract(context.Background(), msg, nil) + if err != nil { + if err.Error() == "execution reverted" { + rpcError, ierr := evmclient.ExtractRPCError(err) + if ierr != nil { + return ierr + } + + reason, abiErr, ierr := extractRevertReason(rpcError.Data.(string), abi) + if ierr != nil { + return ierr + } + + e := fmt.Errorf("failed to call %s: reason: %s reasonargs: %s", method, reason, abiErr) + return e + } + + return err + } + + return nil +} + +// AddCapabilities takes a capability set and provisions it in the registry. +func (c *CapabilityRegistryProvisioner) AddCapabilities(ctx context.Context, capSet CapabilitySet) { + fmt.Printf("Adding capabilities to registry: %s\n", capSet.IDs()) + tx, err := c.reg.AddCapabilities(c.env.Owner, capSet.Capabilities()) + + helpers.PanicErr(err) + helpers.ConfirmTXMined(ctx, c.env.Ec, tx, c.env.ChainID) +} + +// AddNodeOperator takes a node operator and provisions it in the registry. +// +// A node operator is a group of nodes that are all controlled by the same entity. The admin address is the +// address that controls the node operator. +// +// The name is a human-readable name for the node operator. +// +// The node operator is then added to the registry, and the registry will issue an ID for the node operator. +// The ID is then used when adding nodes to the registry such that the registry knows which nodes belong to which +// node operator. +func (c *CapabilityRegistryProvisioner) AddNodeOperator(ctx context.Context, nop *NodeOperator) { + fmt.Printf("Adding NodeOperator to registry: %s\n", nop.Name) + nop.BindToRegistry(c.reg) + + nops, err := c.reg.GetNodeOperators(&bind.CallOpts{}) + if err != nil { + log.Printf("failed to GetNodeOperators: %s", err) + } + for _, n := range nops { + if n.Admin == nop.Admin { + log.Printf("NodeOperator with admin address %s already exists", n.Admin.Hex()) + return + } + } + + tx, err := c.reg.AddNodeOperators(c.env.Owner, []kcr.CapabilitiesRegistryNodeOperator{ + { + Admin: nop.Admin, + Name: nop.Name, + }, + }) + if err != nil { + log.Printf("failed to AddNodeOperators: %s", err) + } + + receipt := helpers.ConfirmTXMined(ctx, c.env.Ec, tx, c.env.ChainID) + nop.SetCapabilityRegistryIssuedID(receipt) +} + +// AddNodes takes a node operators nodes, along with a capability set, then configures the registry such that +// each node is assigned the same capability set. The registry will then know that each node supports each of the +// capabilities in the set. +// +// This is a simplified version of the actual implementation, which is more flexible. The actual implementation +// allows for the ability to add different capability sets to different nodes, _and_ lets you add nodes from different +// node operators to the same capability set. This is not yet implemented here. +// +// Note that the registry must already have the capability set added via `AddCapabilities`, you cannot +// add capabilities that the registry is not yet aware of. +// +// Note that in terms of the provisioning process, this is not the last step. A capability is only active once +// there is a DON servicing it. This is done via `AddDON`. +func (c *CapabilityRegistryProvisioner) AddNodes(ctx context.Context, nop *NodeOperator, donNames ...string) { + fmt.Printf("Adding nodes to registry for NodeOperator %s with DONs: %v\n", nop.Name, donNames) + var params []kcr.CapabilitiesRegistryNodeParams + for _, donName := range donNames { + don, exists := nop.DONs[donName] + if !exists { + log.Fatalf("DON with name %s does not exist in NodeOperator %s", donName, nop.Name) + } + capSet := don.CapabilitySet + for i, peer := range don.Peers { + node, innerErr := peerToNode(nop.id, peer) + if innerErr != nil { + panic(innerErr) + } + node.HashedCapabilityIds = capSet.HashedIDs(c.reg) + node.EncryptionPublicKey = [32]byte{2: byte(i + 1)} + fmt.Printf("Adding node %s to registry with capabilities: %s\n", peer.PeerID, capSet.IDs()) + params = append(params, node) + } + } + + err := c.testCallContract("addNodes", params) + PanicErr(err) + + tx, err := c.reg.AddNodes(c.env.Owner, params) + if err != nil { + log.Printf("failed to AddNodes: %s", err) + } + helpers.ConfirmTXMined(ctx, c.env.Ec, tx, c.env.ChainID) +} + +// AddDON takes a node operator then provisions a DON with the given capabilities. +// +// A DON is a group of nodes that all support the same capability set. This set can be a subset of the +// capabilities that the nodes support. In other words, each node within the node set can support +// a different, possibly overlapping, set of capabilities, but a DON is a subgroup of those nodes that all support +// the same set of capabilities. +// +// A node can belong to multiple DONs, but it must belong to one and only one workflow DON. +// +// A DON can be a capability DON or a workflow DON, or both. +// +// When you want to add solely a workflow DON, you should set `acceptsWorkflows` to true and +// `isPublic` to false. +// This means that the DON can service workflow requests and will not service external capability requests. +// +// If you want to add solely a capability DON, you should set `acceptsWorkflows` to false and `isPublic` to true. This means that the DON +// will service external capability requests and reject workflow requests. +// +// If you want to add a DON that services both capabilities and workflows, you should set both `acceptsWorkflows` and `isPublic` to true. +// +// Another important distinction is that DON can comprise of nodes from different node operators, but for now, we're keeping it simple and restricting it to a single node operator. We also hard code F to 1. +func (c *CapabilityRegistryProvisioner) AddDON(ctx context.Context, nop *NodeOperator, donName string, isPublic bool, acceptsWorkflows bool) { + fmt.Printf("Adding DON %s to registry for NodeOperator %s with isPublic: %t and acceptsWorkflows: %t\n", donName, nop.Name, isPublic, acceptsWorkflows) + don, exists := nop.DONs[donName] + if !exists { + log.Fatalf("DON with name %s does not exist in NodeOperator %s", donName, nop.Name) + } + configs := don.CapabilitySet.Configs(c.reg) + + err := c.testCallContract("addDON", don.MustGetPeerIDs(), configs, isPublic, acceptsWorkflows, don.F) + PanicErr(err) + + tx, err := c.reg.AddDON(c.env.Owner, don.MustGetPeerIDs(), configs, isPublic, acceptsWorkflows, don.F) + + if err != nil { + log.Printf("failed to AddDON: %s", err) + } + helpers.ConfirmTXMined(ctx, c.env.Ec, tx, c.env.ChainID) +} + +/* + * + * Capabilities + * + * + */ +const ( // Taken from https://github.com/smartcontractkit/chainlink/blob/29117850e9be1be1993dbf8f21cf13cbb6af9d24/core/capabilities/integration_tests/keystone_contracts_setup.go#L43 + CapabilityTypeTrigger = uint8(0) + CapabilityTypeAction = uint8(1) + CapabilityTypeConsensus = uint8(2) + CapabilityTypeTarget = uint8(3) +) + +type CapabillityProvisioner interface { + Config() kcr.CapabilitiesRegistryCapabilityConfiguration + Capability() kcr.CapabilitiesRegistryCapability + BindToRegistry(reg kcr.CapabilitiesRegistryInterface) + GetHashedCID() [32]byte +} + +type baseCapability struct { + registry kcr.CapabilitiesRegistryInterface + capability kcr.CapabilitiesRegistryCapability +} + +func (b *baseCapability) BindToRegistry(reg kcr.CapabilitiesRegistryInterface) { + b.registry = reg +} + +func (b *baseCapability) GetHashedCID() [32]byte { + if b.registry == nil { + panic(errors.New("registry not bound to capability, cannot get hashed capability ID")) + } + + return mustHashCapabilityID(b.registry, b.capability) +} + +func (b *baseCapability) GetID() string { + return fmt.Sprintf("%s@%s", b.capability.LabelledName, b.capability.Version) +} + +func (b *baseCapability) config(config *capabilitiespb.CapabilityConfig) kcr.CapabilitiesRegistryCapabilityConfiguration { + configBytes, err := proto.Marshal(config) + if err != nil { + panic(err) + } + + return kcr.CapabilitiesRegistryCapabilityConfiguration{ + Config: configBytes, + CapabilityId: b.GetHashedCID(), + } +} + +func (b *baseCapability) Capability() kcr.CapabilitiesRegistryCapability { + return b.capability +} + +type ConsensusCapability struct { + baseCapability +} + +var _ CapabillityProvisioner = &ConsensusCapability{} + +func (c *ConsensusCapability) Config() kcr.CapabilitiesRegistryCapabilityConfiguration { + // Note that this is hard-coded for now, we'll want to support more flexible configurations in the future + // for configuring consensus once it has more configuration options + config := &capabilitiespb.CapabilityConfig{ + DefaultConfig: values.Proto(values.EmptyMap()).GetMapValue(), + } + + return c.config(config) +} + +// NewOCR3V1ConsensusCapability returns a new ConsensusCapability for OCR3 +func NewOCR3V1ConsensusCapability() *ConsensusCapability { + return &ConsensusCapability{ + baseCapability{ + capability: kcr.CapabilitiesRegistryCapability{ + LabelledName: "offchain_reporting", + Version: "1.0.0", + CapabilityType: CapabilityTypeConsensus, + }, + }, + } +} + +type TargetCapability struct { + baseCapability +} + +var _ CapabillityProvisioner = &TargetCapability{} + +func (t *TargetCapability) Config() kcr.CapabilitiesRegistryCapabilityConfiguration { + // Note that this is hard-coded for now, we'll want to support more flexible configurations in the future + // for configuring the target. This configuration is also specific to the write target + config := &capabilitiespb.CapabilityConfig{ + DefaultConfig: values.Proto(values.EmptyMap()).GetMapValue(), + RemoteConfig: &capabilitiespb.CapabilityConfig_RemoteTargetConfig{ + RemoteTargetConfig: &capabilitiespb.RemoteTargetConfig{ + RequestHashExcludedAttributes: []string{"signed_report.Signatures"}, + }, + }, + } + + return t.config(config) +} + +func NewEthereumGethTestnetV1WriteCapability() *TargetCapability { + return &TargetCapability{ + baseCapability{ + capability: kcr.CapabilitiesRegistryCapability{ + LabelledName: "write_geth-testnet", + Version: "1.0.0", + CapabilityType: CapabilityTypeTarget, + }, + }, + } +} + +type TriggerCapability struct { + baseCapability +} + +var _ CapabillityProvisioner = &TriggerCapability{} + +func (t *TriggerCapability) Config() kcr.CapabilitiesRegistryCapabilityConfiguration { + // Note that this is hard-coded for now, we'll want to support more flexible configurations in the future + // for configuring the trigger. This configuration is also possibly specific to the streams trigger. + config := &capabilitiespb.CapabilityConfig{ + DefaultConfig: values.Proto(values.EmptyMap()).GetMapValue(), + RemoteConfig: &capabilitiespb.CapabilityConfig_RemoteTriggerConfig{ + RemoteTriggerConfig: &capabilitiespb.RemoteTriggerConfig{ + RegistrationRefresh: durationpb.New(20 * time.Second), + RegistrationExpiry: durationpb.New(60 * time.Second), + MinResponsesToAggregate: uint32(1) + 1, // We've hardcoded F + 1 here + }, + }, + } + + return t.config(config) +} + +func NewStreamsTriggerV1Capability() *TriggerCapability { + return &TriggerCapability{ + baseCapability{ + capability: kcr.CapabilitiesRegistryCapability{ + LabelledName: "streams-trigger", + Version: "1.1.0", + CapabilityType: CapabilityTypeTrigger, + }, + }, + } +} + +func mustHashCapabilityID(reg kcr.CapabilitiesRegistryInterface, capability kcr.CapabilitiesRegistryCapability) [32]byte { + hashedCapabilityID, err := reg.GetHashedCapabilityId(&bind.CallOpts{}, capability.LabelledName, capability.Version) + if err != nil { + panic(err) + } + return hashedCapabilityID +} + +/* + * + * Capability Sets + * + * + */ +type CapabilitySet []CapabillityProvisioner + +func NewCapabilitySet(capabilities ...CapabillityProvisioner) CapabilitySet { + if len(capabilities) == 0 { + log.Fatalf("No capabilities provided to NewCapabilitySet") + } + + return capabilities +} + +func MergeCapabilitySets(sets ...CapabilitySet) CapabilitySet { + var merged CapabilitySet + for _, set := range sets { + merged = append(merged, set...) + } + + return merged +} + +func (c *CapabilitySet) Capabilities() []kcr.CapabilitiesRegistryCapability { + definitions := make([]kcr.CapabilitiesRegistryCapability, 0, len(*c)) + for _, cap := range *c { + definitions = append(definitions, cap.Capability()) + } + + return definitions +} + +func (c *CapabilitySet) IDs() []string { + strings := make([]string, 0, len(*c)) + for _, cap := range *c { + strings = append(strings, fmt.Sprintf("%s@%s", cap.Capability().LabelledName, cap.Capability().Version)) + } + + return strings +} + +func (c *CapabilitySet) HashedIDs(reg kcr.CapabilitiesRegistryInterface) [][32]byte { + ids := make([][32]byte, 0, len(*c)) + for _, cap := range *c { + cap.BindToRegistry(reg) + ids = append(ids, cap.GetHashedCID()) + } + + return ids +} + +func (c *CapabilitySet) Configs(reg kcr.CapabilitiesRegistryInterface) []kcr.CapabilitiesRegistryCapabilityConfiguration { + configs := make([]kcr.CapabilitiesRegistryCapabilityConfiguration, 0, len(*c)) + for _, cap := range *c { + cap.BindToRegistry(reg) + configs = append(configs, cap.Config()) + } + + return configs +} + +/* + * + * Node Operator + * + * + */ + +// DON represents a Decentralized Oracle Network with a name, peers, and associated capabilities. +type DON struct { + F uint8 + Name string + Peers []peer + CapabilitySet CapabilitySet +} + +// MustGetPeerIDs retrieves the peer IDs for the DON. It panics if any error occurs. +func (d *DON) MustGetPeerIDs() [][32]byte { + ps, err := peers(d.Peers) + if err != nil { + panic(fmt.Errorf("failed to get peer IDs for DON %s: %w", d.Name, err)) + } + return ps +} + +// NodeOperator represents a node operator with administrative details and multiple DONs. +type NodeOperator struct { + Admin gethCommon.Address + Name string + DONs map[string]DON + + reg kcr.CapabilitiesRegistryInterface + // This ID is generated by the registry when the NodeOperator is added + id uint32 +} + +// NewNodeOperator creates a new NodeOperator with the provided admin address, name, and DONs. +func NewNodeOperator(admin gethCommon.Address, name string, dons map[string]DON) *NodeOperator { + return &NodeOperator{ + Admin: admin, + Name: name, + DONs: dons, + } +} + +func (n *NodeOperator) BindToRegistry(reg kcr.CapabilitiesRegistryInterface) { + n.reg = reg +} + +func (n *NodeOperator) SetCapabilityRegistryIssuedID(receipt *gethTypes.Receipt) uint32 { + if n.reg == nil { + panic(errors.New("registry not bound to node operator, cannot set ID")) + } + // We'll need more complex handling for multiple node operators + // since we'll need to handle log ordering + recLog, err := n.reg.ParseNodeOperatorAdded(*receipt.Logs[0]) + if err != nil { + panic(err) + } + + n.id = recLog.NodeOperatorId + return n.id +} + +func peerIDToB(peerID string) ([32]byte, error) { + var peerIDB ragetypes.PeerID + err := peerIDB.UnmarshalText([]byte(peerID)) + if err != nil { + return [32]byte{}, err + } + + return peerIDB, nil +} + +func peers(ps []peer) ([][32]byte, error) { + out := [][32]byte{} + for _, p := range ps { + b, err := peerIDToB(p.PeerID) + if err != nil { + return nil, err + } + + out = append(out, b) + } + + return out, nil +} + +func peerToNode(nopID uint32, p peer) (kcr.CapabilitiesRegistryNodeParams, error) { + peerIDB, err := peerIDToB(p.PeerID) + if err != nil { + return kcr.CapabilitiesRegistryNodeParams{}, fmt.Errorf("failed to convert peerID: %w", err) + } + + sig := strings.TrimPrefix(p.Signer, "0x") + signerB, err := hex.DecodeString(sig) + if err != nil { + return kcr.CapabilitiesRegistryNodeParams{}, fmt.Errorf("failed to convert signer: %w", err) + } + + var sigb [32]byte + copy(sigb[:], signerB) + + return kcr.CapabilitiesRegistryNodeParams{ + NodeOperatorId: nopID, + P2pId: peerIDB, + Signer: sigb, + }, nil +} + +// newCapabilityConfig returns a new capability config with the default config set as empty. +// Override the empty default config with functional options. +func newCapabilityConfig(opts ...func(*values.Map)) *capabilitiespb.CapabilityConfig { + dc := values.EmptyMap() + for _, opt := range opts { + opt(dc) + } + + return &capabilitiespb.CapabilityConfig{ + DefaultConfig: values.ProtoMap(dc), + } +} diff --git a/core/scripts/keystone/src/88_contracts_helpers.go b/core/scripts/keystone/src/88_contracts_helpers.go new file mode 100644 index 00000000000..59bfeb68201 --- /dev/null +++ b/core/scripts/keystone/src/88_contracts_helpers.go @@ -0,0 +1,192 @@ +package src + +import ( + "context" + + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/ethereum/go-ethereum/common" + + helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + capabilities_registry "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/capabilities_registry_1_1_0" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/forwarder" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/keystone/generated/ocr3_capability" + verifierContract "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier" + "github.com/smartcontractkit/chainlink/v2/core/gethwrappers/llo-feeds/generated/verifier_proxy" +) + +var ZeroAddress = common.Address{} + +type OnChainMetaSerialized struct { + OCR common.Address `json:"ocrContract"` + Forwarder common.Address `json:"forwarderContract"` + // The block number of the transaction that set the config on the OCR3 contract. We use this to replay blocks from this point on + // when we load the OCR3 job specs on the nodes. + SetConfigTxBlock uint64 `json:"setConfigTxBlock"` + + CapabilitiesRegistry common.Address `json:"CapabilitiesRegistry"` + Verifier common.Address `json:"VerifierContract"` + VerifierProxy common.Address `json:"VerifierProxy"` + // Stores the address that has been initialized by the proxy, if any + InitializedVerifierAddress common.Address `json:"InitializedVerifierAddress"` +} + +type onchainMeta struct { + OCR3 ocr3_capability.OCR3CapabilityInterface + Forwarder forwarder.KeystoneForwarderInterface + // The block number of the transaction that set the config on the OCR3 contract. We use this to replay blocks from this point on + // when we load the OCR3 job specs on the nodes. + SetConfigTxBlock uint64 + + CapabilitiesRegistry capabilities_registry.CapabilitiesRegistryInterface + Verifier verifierContract.VerifierInterface + VerifierProxy verifier_proxy.VerifierProxyInterface + InitializedVerifierAddress common.Address `json:"InitializedVerifierAddress"` +} + +func WriteOnchainMeta(o *onchainMeta, artefactsDir string) { + ensureArtefactsDir(artefactsDir) + + fmt.Println("Writing deployed contract addresses to file...") + serialzed := OnChainMetaSerialized{} + + if o.OCR3 != nil { + serialzed.OCR = o.OCR3.Address() + } + + if o.Forwarder != nil { + serialzed.Forwarder = o.Forwarder.Address() + } + + serialzed.SetConfigTxBlock = o.SetConfigTxBlock + serialzed.InitializedVerifierAddress = o.InitializedVerifierAddress + + if o.CapabilitiesRegistry != nil { + serialzed.CapabilitiesRegistry = o.CapabilitiesRegistry.Address() + } + + if o.Verifier != nil { + serialzed.Verifier = o.Verifier.Address() + } + + if o.VerifierProxy != nil { + serialzed.VerifierProxy = o.VerifierProxy.Address() + } + + jsonBytes, err := json.Marshal(serialzed) + PanicErr(err) + + err = os.WriteFile(deployedContractsFilePath(artefactsDir), jsonBytes, 0600) + PanicErr(err) +} + +func LoadOnchainMeta(artefactsDir string, env helpers.Environment) *onchainMeta { + hydrated := &onchainMeta{} + if !ContractsAlreadyDeployed(artefactsDir) { + fmt.Printf("No deployed contracts file found at %s\n", deployedContractsFilePath(artefactsDir)) + return hydrated + } + + jsonBytes, err := os.ReadFile(deployedContractsFilePath(artefactsDir)) + if err != nil { + fmt.Printf("Error reading deployed contracts file: %s\n", err) + return hydrated + } + + var s OnChainMetaSerialized + err = json.Unmarshal(jsonBytes, &s) + if err != nil { + return hydrated + } + + hydrated.SetConfigTxBlock = s.SetConfigTxBlock + if s.OCR != ZeroAddress { + if !contractExists(s.OCR, env) { + fmt.Printf("OCR contract at %s does not exist\n", s.OCR.Hex()) + } else { + ocr3, e := ocr3_capability.NewOCR3Capability(s.OCR, env.Ec) + PanicErr(e) + hydrated.OCR3 = ocr3 + } + } + + if s.Forwarder != ZeroAddress { + if !contractExists(s.Forwarder, env) { + fmt.Printf("Forwarder contract at %s does not exist\n", s.Forwarder.Hex()) + } else { + fwdr, e := forwarder.NewKeystoneForwarder(s.Forwarder, env.Ec) + PanicErr(e) + hydrated.Forwarder = fwdr + } + } + + if s.CapabilitiesRegistry != ZeroAddress { + if !contractExists(s.CapabilitiesRegistry, env) { + fmt.Printf("CapabilityRegistry contract at %s does not exist\n", s.CapabilitiesRegistry.Hex()) + } else { + cr, e := capabilities_registry.NewCapabilitiesRegistry(s.CapabilitiesRegistry, env.Ec) + PanicErr(e) + hydrated.CapabilitiesRegistry = cr + } + } + + hydrated.InitializedVerifierAddress = s.InitializedVerifierAddress + + if s.Verifier != ZeroAddress { + if !contractExists(s.Verifier, env) { + fmt.Printf("Verifier contract at %s does not exist\n", s.Verifier.Hex()) + hydrated.InitializedVerifierAddress = ZeroAddress + } else { + verifier, e := verifierContract.NewVerifier(s.Verifier, env.Ec) + PanicErr(e) + hydrated.Verifier = verifier + } + } + + if s.VerifierProxy != ZeroAddress { + if !contractExists(s.VerifierProxy, env) { + fmt.Printf("VerifierProxy contract at %s does not exist\n", s.VerifierProxy.Hex()) + hydrated.InitializedVerifierAddress = ZeroAddress + } else { + verifierProxy, e := verifier_proxy.NewVerifierProxy(s.VerifierProxy, env.Ec) + PanicErr(e) + hydrated.VerifierProxy = verifierProxy + } + } + + blkNum, err := env.Ec.BlockNumber(context.Background()) + PanicErr(err) + + if s.SetConfigTxBlock > blkNum { + fmt.Printf("Stale SetConfigTxBlock: %d, current block number: %d\n", s.SetConfigTxBlock, blkNum) + hydrated.SetConfigTxBlock = 0 + } + + return hydrated +} + +func ContractsAlreadyDeployed(artefactsDir string) bool { + _, err := os.Stat(artefactsDir) + if err != nil { + return false + } + + _, err = os.Stat(deployedContractsFilePath(artefactsDir)) + + return err == nil +} + +func deployedContractsFilePath(artefactsDir string) string { + return filepath.Join(artefactsDir, deployedContractsJSON) +} + +func contractExists(address common.Address, env helpers.Environment) bool { + byteCode, err := env.Ec.CodeAt(context.Background(), address, nil) + if err != nil { + return false + } + return len(byteCode) != 0 +} diff --git a/core/scripts/keystone/src/88_gen_jobspecs.go b/core/scripts/keystone/src/88_gen_jobspecs.go deleted file mode 100644 index e88833c9865..00000000000 --- a/core/scripts/keystone/src/88_gen_jobspecs.go +++ /dev/null @@ -1,91 +0,0 @@ -package src - -import ( - "fmt" - "path/filepath" - "strconv" - "strings" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" - "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" -) - -type spec []string - -func (s spec) ToString() string { - return strings.Join(s, "\n") -} - -type hostSpec struct { - spec spec - host string -} - -type donHostSpec struct { - bootstrap hostSpec - oracles []hostSpec -} - -func genSpecs( - pubkeysPath string, - nodeListPath string, - templatesDir string, - chainID int64, - p2pPort int64, - ocrConfigContractAddress string, -) donHostSpec { - nodes := downloadNodeAPICredentials(nodeListPath) - nca := downloadNodePubKeys(nodeListPath, chainID, pubkeysPath) - bootstrapNode := nca[0] - - bootstrapSpecLines, err := readLines(filepath.Join(templatesDir, bootstrapSpecTemplate)) - helpers.PanicErr(err) - bootHost := nodes[0].remoteURL.Hostname() - bootstrapSpecLines = replacePlaceholders( - bootstrapSpecLines, - chainID, p2pPort, - ocrConfigContractAddress, bootHost, - bootstrapNode, bootstrapNode, - ) - bootstrap := hostSpec{bootstrapSpecLines, bootHost} - - oracleSpecLinesTemplate, err := readLines(filepath.Join(templatesDir, oracleSpecTemplate)) - helpers.PanicErr(err) - oracles := []hostSpec{} - for i := 1; i < len(nodes); i++ { - oracleSpecLines := oracleSpecLinesTemplate - oracleSpecLines = replacePlaceholders( - oracleSpecLines, - chainID, p2pPort, - ocrConfigContractAddress, bootHost, - bootstrapNode, nca[i], - ) - oracles = append(oracles, hostSpec{oracleSpecLines, nodes[i].remoteURL.Host}) - } - - return donHostSpec{ - bootstrap: bootstrap, - oracles: oracles, - } -} - -func replacePlaceholders( - lines []string, - - chainID, p2pPort int64, - contractAddress, bootHost string, - boot, node changeset.NodeKeys, -) (output []string) { - chainIDStr := strconv.FormatInt(chainID, 10) - bootstrapper := fmt.Sprintf("%s@%s:%d", boot.P2PPeerID, bootHost, p2pPort) - for _, l := range lines { - l = strings.Replace(l, "{{ chain_id }}", chainIDStr, 1) - l = strings.Replace(l, "{{ ocr_config_contract_address }}", contractAddress, 1) - l = strings.Replace(l, "{{ transmitter_id }}", node.EthAddress, 1) - l = strings.Replace(l, "{{ ocr_key_bundle_id }}", node.OCR2BundleID, 1) - l = strings.Replace(l, "{{ aptos_key_bundle_id }}", node.AptosBundleID, 1) - l = strings.Replace(l, "{{ bootstrapper_p2p_id }}", bootstrapper, 1) - output = append(output, l) - } - return -} diff --git a/core/scripts/keystone/src/88_gen_jobspecs_test.go b/core/scripts/keystone/src/88_gen_jobspecs_test.go deleted file mode 100644 index 7af11646c4e..00000000000 --- a/core/scripts/keystone/src/88_gen_jobspecs_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package src - -import ( - "fmt" - "testing" - - "github.com/gkampitakis/go-snaps/snaps" -) - -func (d *donHostSpec) ToString() string { - var result string - result += "Bootstrap:\n" - result += "Host: " + d.bootstrap.host + "\n" - result += d.bootstrap.spec.ToString() - result += "\n\nOracles:\n" - for i, oracle := range d.oracles { - if i != 0 { - result += "--------------------------------\n" - } - result += fmt.Sprintf("Oracle %d:\n", i) - result += "Host: " + oracle.host + "\n" - result += oracle.spec.ToString() - result += "\n\n" - } - return result -} - -func TestGenSpecs(t *testing.T) { - pubkeysPath := "./testdata/PublicKeys.json" - nodeListPath := "./testdata/NodeList.txt" - chainID := int64(11155111) - p2pPort := int64(6690) - contractAddress := "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" - - specs := genSpecs(pubkeysPath, nodeListPath, "../templates", chainID, p2pPort, contractAddress) - snaps.MatchSnapshot(t, specs.ToString()) -} diff --git a/core/scripts/keystone/src/88_gen_ocr3_config.go b/core/scripts/keystone/src/88_gen_ocr3_config.go deleted file mode 100644 index 94217b07f4e..00000000000 --- a/core/scripts/keystone/src/88_gen_ocr3_config.go +++ /dev/null @@ -1,20 +0,0 @@ -package src - -import ( - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" - "github.com/smartcontractkit/chainlink/deployment" - ksdeploy "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" -) - -func mustReadConfig(fileName string) (output ksdeploy.TopLevelConfigSource) { - return mustParseJSON[ksdeploy.TopLevelConfigSource](fileName) -} - -func generateOCR3Config(nodeList string, configFile string, chainID int64, pubKeysPath string) ksdeploy.OCR3OnchainConfig { - topLevelCfg := mustReadConfig(configFile) - cfg := topLevelCfg.OracleConfig - nca := downloadNodePubKeys(nodeList, chainID, pubKeysPath) - c, err := ksdeploy.GenerateOCR3Config(cfg, nca, deployment.XXXGenerateTestOCRSecrets()) - helpers.PanicErr(err) - return c -} diff --git a/core/scripts/keystone/src/88_gen_ocr3_config_test.go b/core/scripts/keystone/src/88_gen_ocr3_config_test.go deleted file mode 100644 index 10cdc07b204..00000000000 --- a/core/scripts/keystone/src/88_gen_ocr3_config_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package src - -import ( - "errors" - "testing" - - "github.com/gkampitakis/go-snaps/match" - "github.com/gkampitakis/go-snaps/snaps" -) - -func TestGenerateOCR3Config(t *testing.T) { - // Generate OCR3 config - config := generateOCR3Config(".cache/NodeList.txt", "./testdata/SampleConfig.json", 11155111, "./testdata/PublicKeys.json") - - matchOffchainConfig := match.Custom("OffchainConfig", func(s any) (any, error) { - // coerce the value to a string - s, ok := s.(string) - if !ok { - return nil, errors.New("offchain config is not a string") - } - - // if the string is not empty - if s == "" { - return nil, errors.New("offchain config is empty") - } - - return "", nil - }) - - snaps.MatchJSON(t, config, matchOffchainConfig) -} diff --git a/core/scripts/keystone/src/88_jobspecs_helpers.go b/core/scripts/keystone/src/88_jobspecs_helpers.go new file mode 100644 index 00000000000..0e6cc3a043a --- /dev/null +++ b/core/scripts/keystone/src/88_jobspecs_helpers.go @@ -0,0 +1,53 @@ +package src + +import ( + "fmt" +) + +type OCRSpec struct { + ContractID string +} + +type BootSpec struct { + ContractID string +} + +type WorkflowSpec struct { + WorkflowID string +} + +type JobSpec struct { + ID string + Name string + BootstrapSpec BootSpec + OffChainReporting2OracleSpec OCRSpec + WorkflowSpec WorkflowSpec +} + +func upsertJob(api *nodeAPI, jobSpecName string, jobSpecStr string) { + jobsResp := api.mustExec(api.methods.ListJobs) + jobs := mustJSON[[]JobSpec](jobsResp) + for _, job := range *jobs { + if job.Name == jobSpecName { + fmt.Printf("Job already exists: %s, replacing..\n", jobSpecName) + api.withArg(job.ID).mustExec(api.methods.DeleteJob) + break + } + } + + fmt.Printf("Deploying jobspec: %s\n", jobSpecName) + _, err := api.withArg(jobSpecStr).exec(api.methods.CreateJob) + if err != nil { + panic(fmt.Sprintf("Failed to deploy job spec: %s Error: %s", jobSpecStr, err)) + } +} + +func clearJobs(api *nodeAPI) { + jobsResp := api.mustExec(api.methods.ListJobs) + jobs := mustJSON[[]JobSpec](jobsResp) + for _, job := range *jobs { + fmt.Printf("Deleting job: %s\n", job.Name) + api.withArg(job.ID).mustExec(api.methods.DeleteJob) + } + fmt.Println("All jobs have been deleted.") +} diff --git a/core/scripts/keystone/src/88_ocr_helpers.go b/core/scripts/keystone/src/88_ocr_helpers.go new file mode 100644 index 00000000000..7cdfd72ca52 --- /dev/null +++ b/core/scripts/keystone/src/88_ocr_helpers.go @@ -0,0 +1,69 @@ +package src + +import ( + "encoding/hex" + + "github.com/ethereum/go-ethereum/common" + "github.com/smartcontractkit/libocr/offchainreporting2/types" + + ksdeploy "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" +) + +func ocrConfToContractConfig(ocrConf ksdeploy.OCR3OnchainConfig, configCount uint32) types.ContractConfig { + cc := types.ContractConfig{ + Signers: convertByteSliceToOnchainPublicKeys(ocrConf.Signers), + Transmitters: convertAddressesToAccounts(ocrConf.Transmitters), + F: ocrConf.F, + OnchainConfig: ocrConf.OnchainConfig, + OffchainConfigVersion: ocrConf.OffchainConfigVersion, + OffchainConfig: ocrConf.OffchainConfig, + ConfigCount: uint64(configCount), + } + return cc +} + +func mercuryOCRConfigToContractConfig(ocrConf MercuryOCR2Config, configCount uint32) types.ContractConfig { + cc := types.ContractConfig{ + Signers: convertAddressesToOnchainPublicKeys(ocrConf.Signers), + Transmitters: convertBytes32sToAccounts(ocrConf.Transmitters), + F: ocrConf.F, + OnchainConfig: ocrConf.OnchainConfig, + OffchainConfigVersion: ocrConf.OffchainConfigVersion, + OffchainConfig: ocrConf.OffchainConfig, + ConfigCount: uint64(configCount), + } + + return cc +} + +func convertAddressesToOnchainPublicKeys(addresses []common.Address) []types.OnchainPublicKey { + keys := make([]types.OnchainPublicKey, len(addresses)) + for i, addr := range addresses { + keys[i] = types.OnchainPublicKey(addr.Bytes()) + } + return keys +} + +func convertAddressesToAccounts(addresses []common.Address) []types.Account { + accounts := make([]types.Account, len(addresses)) + for i, addr := range addresses { + accounts[i] = types.Account(addr.Hex()) + } + return accounts +} + +func convertBytes32sToAccounts(bs [][32]byte) []types.Account { + accounts := make([]types.Account, len(bs)) + for i, b := range bs { + accounts[i] = types.Account(hex.EncodeToString(b[:])) + } + return accounts +} + +func convertByteSliceToOnchainPublicKeys(bs [][]byte) []types.OnchainPublicKey { + keys := make([]types.OnchainPublicKey, len(bs)) + for i, b := range bs { + keys[i] = types.OnchainPublicKey(b) + } + return keys +} diff --git a/core/scripts/keystone/src/99_app.go b/core/scripts/keystone/src/99_app.go index 6e59932aa71..29164959bec 100644 --- a/core/scripts/keystone/src/99_app.go +++ b/core/scripts/keystone/src/99_app.go @@ -1,31 +1,389 @@ package src import ( + "bytes" + "context" + "encoding/json" + "errors" "flag" "io" + "net/http" + "net/url" + "reflect" + "runtime" + "strings" + "sync" + "time" "github.com/urfave/cli" + "go.uber.org/zap/zapcore" helpers "github.com/smartcontractkit/chainlink/core/scripts/common" + "github.com/smartcontractkit/chainlink/v2/core/cmd" clcmd "github.com/smartcontractkit/chainlink/v2/core/cmd" + "github.com/smartcontractkit/chainlink/v2/core/logger" + clsessions "github.com/smartcontractkit/chainlink/v2/core/sessions" ) -func newApp(n *node, writer io.Writer) (*clcmd.Shell, *cli.App) { +// Package-level cache and mutex +var ( + nodeAPICache = make(map[string]*nodeAPI) + cacheMutex = &sync.Mutex{} +) + +func newApp(n NodeWithCreds, writer io.Writer) (*clcmd.Shell, *cli.App) { + loggingCfg := logger.Config{ + LogLevel: zapcore.InfoLevel, + JsonConsole: true, + } + logger, closeLggr := loggingCfg.New() + u, err := url.Parse(n.RemoteURL.String()) + PanicErr(err) + + clientOpts := clcmd.ClientOpts{RemoteNodeURL: *u, InsecureSkipVerify: true} + sr := clsessions.SessionRequest{Email: n.APILogin, Password: n.APIPassword} + + // Set the log level to error for the HTTP client, we don't care about + // the ssl warnings it emits for CRIB + logger.SetLogLevel(zapcore.ErrorLevel) + cookieAuth := cmd.NewSessionCookieAuthenticator( + clientOpts, + &cmd.MemoryCookieStore{}, + logger, + ) + + http := NewRetryableAuthenticatedHTTPClient(logger, clientOpts, cookieAuth, sr) + // Set the log level back to info for the shell + logger.SetLogLevel(zapcore.InfoLevel) client := &clcmd.Shell{ - Renderer: clcmd.RendererJSON{Writer: writer}, - AppFactory: clcmd.ChainlinkAppFactory{}, - KeyStoreAuthenticator: clcmd.TerminalKeyStoreAuthenticator{Prompter: n}, - FallbackAPIInitializer: clcmd.NewPromptingAPIInitializer(n), - Runner: clcmd.ChainlinkRunner{}, - PromptingSessionRequestBuilder: clcmd.NewPromptingSessionRequestBuilder(n), - ChangePasswordPrompter: clcmd.NewChangePasswordPrompter(), - PasswordPrompter: clcmd.NewPasswordPrompter(), + Logger: logger, + Renderer: clcmd.RendererJSON{Writer: writer}, + AppFactory: clcmd.ChainlinkAppFactory{}, + Runner: clcmd.ChainlinkRunner{}, + HTTP: http, + + CloseLogger: closeLggr, } app := clcmd.NewApp(client) - fs := flag.NewFlagSet("blah", flag.ContinueOnError) - fs.String("remote-node-url", n.url.String(), "") - helpers.PanicErr(app.Before(cli.NewContext(nil, fs, nil))) - // overwrite renderer since it's set to stdout after Before() is called - client.Renderer = clcmd.RendererJSON{Writer: writer} return client, app } + +type nodeAPI struct { + methods *cmd.Shell + app *cli.App + output *bytes.Buffer + fs *flag.FlagSet + clientMethod func(*cli.Context) error + logger logger.Logger +} + +func newNodeAPI(n NodeWithCreds) *nodeAPI { + // Create a unique key for the cache + key := n.RemoteURL.String() + + // Check if the nodeAPI exists in the cache + cacheMutex.Lock() + if api, exists := nodeAPICache[key]; exists { + cacheMutex.Unlock() + return api + } + cacheMutex.Unlock() + + output := &bytes.Buffer{} + methods, app := newApp(n, output) + + api := &nodeAPI{ + output: output, + methods: methods, + app: app, + fs: flag.NewFlagSet("test", flag.ContinueOnError), + logger: methods.Logger.Named("NodeAPI"), + } + + // Store the new nodeAPI in the cache + cacheMutex.Lock() + nodeAPICache[key] = api + cacheMutex.Unlock() + + return api +} + +func (c *nodeAPI) withArg(arg string) *nodeAPI { + err := c.fs.Parse([]string{arg}) + helpers.PanicErr(err) + + return c +} + +func (c *nodeAPI) withArgs(args ...string) *nodeAPI { + err := c.fs.Parse(args) + helpers.PanicErr(err) + + return c +} + +func (c *nodeAPI) withFlags(clientMethod func(*cli.Context) error, applyFlags func(*flag.FlagSet)) *nodeAPI { + flagSetApplyFromAction(clientMethod, c.fs, "") + applyFlags(c.fs) + + c.clientMethod = clientMethod + + return c +} + +func (c *nodeAPI) exec(clientMethod ...func(*cli.Context) error) ([]byte, error) { + if len(clientMethod) > 1 { + PanicErr(errors.New("Only one client method allowed")) + } + + defer c.output.Reset() + defer func() { + c.fs = flag.NewFlagSet("test", flag.ContinueOnError) + c.clientMethod = nil + }() + + if c.clientMethod == nil { + c.clientMethod = clientMethod[0] + } + + retryCount := 3 + for i := 0; i < retryCount; i++ { + c.logger.Tracew("Attempting API request", "attempt", i+1, "maxAttempts", retryCount) + c.output.Reset() + ctx := cli.NewContext(c.app, c.fs, nil) + err := c.clientMethod(ctx) + + if err == nil { + c.logger.Tracew("API request completed successfully", "attempt", i+1) + return c.output.Bytes(), nil + } + + if !strings.Contains(err.Error(), "invalid character '<' looking for beginning of value") { + c.logger.Tracew("API request failed with non-retriable error", + "attempt", i+1, + "err", err, + ) + return nil, err + } + + c.logger.Warnw("Encountered 504 gateway error during API request, retrying", + "attempt", i+1, + "maxAttempts", retryCount, + "err", err, + ) + + if i == retryCount-1 { + c.logger.Error("Failed to complete API request after all retry attempts") + return nil, err + } + + c.logger.Tracew("Waiting before retry attempt", + "attempt", i+1, + "waitTime", "1s", + ) + time.Sleep(3 * time.Second) + } + + return nil, errors.New("API request failed after retries") +} + +func (c *nodeAPI) mustExec(clientMethod ...func(*cli.Context) error) []byte { + bytes, err := c.exec(clientMethod...) + helpers.PanicErr(err) + return bytes +} + +// flagSetApplyFromAction applies the flags from action to the flagSet. +// +// `parentCommand` will filter the app commands and only applies the flags if the command/subcommand has a parent with that name, if left empty no filtering is done +// +// Taken from: https://github.com/smartcontractkit/chainlink/blob/develop/core/cmd/shell_test.go#L590 +func flagSetApplyFromAction(action interface{}, flagSet *flag.FlagSet, parentCommand string) { + cliApp := cmd.Shell{} + app := cmd.NewApp(&cliApp) + + foundName := parentCommand == "" + actionFuncName := getFuncName(action) + + for _, command := range app.Commands { + flags := recursiveFindFlagsWithName(actionFuncName, command, parentCommand, foundName) + + for _, flag := range flags { + flag.Apply(flagSet) + } + } +} + +func recursiveFindFlagsWithName(actionFuncName string, command cli.Command, parent string, foundName bool) []cli.Flag { + if command.Action != nil { + if actionFuncName == getFuncName(command.Action) && foundName { + return command.Flags + } + } + + for _, subcommand := range command.Subcommands { + if !foundName { + foundName = strings.EqualFold(subcommand.Name, parent) + } + + found := recursiveFindFlagsWithName(actionFuncName, subcommand, parent, foundName) + if found != nil { + return found + } + } + return nil +} + +func getFuncName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} + +func mustJSON[T any](bytes []byte) *T { + typedPayload := new(T) + err := json.Unmarshal(bytes, typedPayload) + if err != nil { + PanicErr(err) + } + return typedPayload +} + +type retryableAuthenticatedHTTPClient struct { + client cmd.HTTPClient + logger logger.Logger +} + +func NewRetryableAuthenticatedHTTPClient(lggr logger.Logger, clientOpts clcmd.ClientOpts, cookieAuth cmd.CookieAuthenticator, sessionRequest clsessions.SessionRequest) cmd.HTTPClient { + return &retryableAuthenticatedHTTPClient{ + client: cmd.NewAuthenticatedHTTPClient(lggr, clientOpts, cookieAuth, sessionRequest), + logger: lggr.Named("RetryableAuthenticatedHTTPClient"), + } +} + +func logBody(body io.Reader) (string, io.Reader) { + if body == nil { + return "", nil + } + + var buf bytes.Buffer + tee := io.TeeReader(body, &buf) + bodyBytes, _ := io.ReadAll(tee) + return string(bodyBytes), bytes.NewReader(buf.Bytes()) +} + +func logResponse(logger logger.Logger, resp *http.Response) { + if resp == nil { + logger.Trace("Response was nil") + return + } + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + logger.Errorw("Failed to read response body for logging", "err", err) + return + } + // Replace the body so it can be read again by the caller + resp.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + + logger.Tracew("Response details", + "statusCode", resp.StatusCode, + "status", resp.Status, + "headers", resp.Header, + "body", string(bodyBytes), + ) +} + +func (h *retryableAuthenticatedHTTPClient) Get(ctx context.Context, path string, headers ...map[string]string) (*http.Response, error) { + h.logger.Tracew("Making GET request", + "path", path, + "headers", headers, + ) + return h.doRequestWithRetry(ctx, func() (*http.Response, error) { + return h.client.Get(ctx, path, headers...) + }) +} + +func (h *retryableAuthenticatedHTTPClient) Post(ctx context.Context, path string, body io.Reader) (*http.Response, error) { + bodyStr, newBody := logBody(body) + h.logger.Tracew("Making POST request", + "path", path, + "body", bodyStr, + ) + return h.doRequestWithRetry(ctx, func() (*http.Response, error) { + return h.client.Post(ctx, path, newBody) + }) +} + +func (h *retryableAuthenticatedHTTPClient) Put(ctx context.Context, path string, body io.Reader) (*http.Response, error) { + bodyStr, newBody := logBody(body) + h.logger.Tracew("Making PUT request", + "path", path, + "body", bodyStr, + ) + return h.doRequestWithRetry(ctx, func() (*http.Response, error) { + return h.client.Put(ctx, path, newBody) + }) +} + +func (h *retryableAuthenticatedHTTPClient) Patch(ctx context.Context, path string, body io.Reader, headers ...map[string]string) (*http.Response, error) { + bodyStr, newBody := logBody(body) + h.logger.Tracew("Making PATCH request", + "path", path, + "headers", headers, + "body", bodyStr, + ) + return h.doRequestWithRetry(ctx, func() (*http.Response, error) { + return h.client.Patch(ctx, path, newBody, headers...) + }) +} + +func (h *retryableAuthenticatedHTTPClient) Delete(ctx context.Context, path string) (*http.Response, error) { + h.logger.Tracew("Making DELETE request", + "path", path, + ) + return h.doRequestWithRetry(ctx, func() (*http.Response, error) { + return h.client.Delete(ctx, path) + }) +} + +func (h *retryableAuthenticatedHTTPClient) doRequestWithRetry(_ context.Context, req func() (*http.Response, error)) (*http.Response, error) { + retryCount := 3 + for i := 0; i < retryCount; i++ { + h.logger.Tracew("Attempting request", "attempt", i+1, "maxAttempts", retryCount) + + response, err := req() + logResponse(h.logger, response) + + if err == nil || !strings.Contains(err.Error(), "invalid character '<' looking for beginning of value") { + if err != nil { + h.logger.Warn("Request completed with error", + "attempt", i+1, + "err", err, + ) + } else { + h.logger.Tracew("Request completed successfully", + "attempt", i+1, + "statusCode", response.StatusCode, + ) + } + return response, err + } + + h.logger.Warnw("Encountered 504 error during request, retrying", + "attempt", i+1, + "maxAttempts", retryCount, + "err", err, + ) + + if i == retryCount-1 { + h.logger.Error("Failed to complete request after all retry attempts") + return response, err + } + + h.logger.Tracew("Waiting before retry attempt", + "attempt", i+1, + "waitTime", "1s", + ) + time.Sleep(1 * time.Second) + } + return nil, errors.New("request failed after retries") +} diff --git a/core/scripts/keystone/src/99_crib_client.go b/core/scripts/keystone/src/99_crib_client.go index ebf9f9ee955..1a38cafe9bb 100644 --- a/core/scripts/keystone/src/99_crib_client.go +++ b/core/scripts/keystone/src/99_crib_client.go @@ -4,7 +4,7 @@ package src import ( "fmt" - "net/url" + "sort" "strings" ) @@ -12,12 +12,24 @@ type CribClient struct { k8sClient *K8sClient } -type CLNodeCredentials struct { - URL *url.URL - PodName string - Username string - Password string - NodePassword string +// SimpleURL lets us marshal a URL with only the fields we need. +type SimpleURL struct { + Scheme string `json:"scheme"` + Host string `json:"host"` + Path string `json:"path"` +} + +func (s SimpleURL) String() string { + return fmt.Sprintf("%s://%s%s", s.Scheme, s.Host, s.Path) +} + +type NodeWithCreds struct { + URL SimpleURL + RemoteURL SimpleURL + ServiceName string + APILogin string + APIPassword string + KeystorePassword string } func NewCribClient() *CribClient { @@ -27,35 +39,44 @@ func NewCribClient() *CribClient { } } -func (m *CribClient) GetCLNodeCredentials() ([]CLNodeCredentials, error) { - fmt.Println("Getting CL node pods with config maps...") - pods, err := m.k8sClient.GetPodsWithConfigMap() +func (m *CribClient) getCLNodes() ([]NodeWithCreds, error) { + fmt.Println("Getting CL node deployments with config maps...") + deployments, err := m.k8sClient.GetDeploymentsWithConfigMap() if err != nil { return nil, err } - clNodeCredentials := []CLNodeCredentials{} + nodes := []NodeWithCreds{} - for _, pod := range pods { - apiCredentials := pod.ConfigMap.Data["apicredentials"] + for _, deployment := range deployments { + apiCredentials := deployment.ConfigMap.Data["apicredentials"] splitCreds := strings.Split(strings.TrimSpace(apiCredentials), "\n") username := splitCreds[0] password := splitCreds[1] - nodePassword := pod.ConfigMap.Data["node-password"] - url, err := url.Parse("https://" + pod.Host) - if err != nil { - return nil, err + keystorePassword := deployment.ConfigMap.Data["node-password"] + url := SimpleURL{ + Scheme: "https", + Host: deployment.Host, + Path: "", } - clNodeCredential := CLNodeCredentials{ - URL: url, - PodName: pod.Name, - Username: username, - Password: password, - NodePassword: nodePassword, + node := NodeWithCreds{ + // We dont handle both in-cluster and out-of-cluster deployments + // Hence why both URL and RemoteURL are the same + URL: url, + RemoteURL: url, + ServiceName: deployment.ServiceName, + APILogin: username, + APIPassword: password, + KeystorePassword: keystorePassword, } - clNodeCredentials = append(clNodeCredentials, clNodeCredential) + nodes = append(nodes, node) } - return clNodeCredentials, nil + // Sort nodes by URL + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].URL.Host < nodes[j].URL.Host + }) + + return nodes, nil } diff --git a/core/scripts/keystone/src/99_fetch_keys.go b/core/scripts/keystone/src/99_fetch_keys.go index 056769dc714..63e191a1234 100644 --- a/core/scripts/keystone/src/99_fetch_keys.go +++ b/core/scripts/keystone/src/99_fetch_keys.go @@ -1,230 +1,271 @@ package src import ( - "bytes" "encoding/json" "errors" - "flag" "fmt" "os" - "sort" "strings" "github.com/urfave/cli" helpers "github.com/smartcontractkit/chainlink/core/scripts/common" - "github.com/smartcontractkit/chainlink/deployment/keystone/changeset" ubig "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" "github.com/smartcontractkit/chainlink/v2/core/cmd" "github.com/smartcontractkit/chainlink/v2/core/web/presenters" ) -func downloadNodePubKeys(nodeList string, chainID int64, pubKeysPath string) []changeset.NodeKeys { - // Check if file exists already, and if so, return the keys - if _, err := os.Stat(pubKeysPath); err == nil { - fmt.Println("Loading existing public keys at:", pubKeysPath) - return mustParseJSON[[]changeset.NodeKeys](pubKeysPath) - } - - nodes := downloadNodeAPICredentials(nodeList) - nodesKeys := mustFetchNodesKeys(chainID, nodes) +// NodeSet represents a set of nodes with associated metadata. +// NodeKeys are indexed by the same order as Nodes. +type NodeSet struct { + Name string + Prefix string + Nodes []NodeWithCreds + NodeKeys []NodeKeys +} - marshalledNodeKeys, err := json.MarshalIndent(nodesKeys, "", " ") - if err != nil { - panic(err) - } - err = os.WriteFile(pubKeysPath, marshalledNodeKeys, 0600) - if err != nil { - panic(err) - } - fmt.Println("Keystone OCR2 public keys have been saved to:", pubKeysPath) +var ( + WorkflowNodeSetName = "workflow" + WorkflowNodeSetPrefix = "ks-wf-" + StreamsTriggerNodeSetName = "streams-trigger" + StreamsTriggerNodeSetPrefix = "ks-str-trig-" +) - return nodesKeys +// NodeSets holds the two NodeSets: Workflow and StreamsTrigger. +type NodeSets struct { + Workflow NodeSet + StreamsTrigger NodeSet } -// downloadNodeAPICredentials downloads the node API credentials, or loads them from disk if they already exist -// -// The nodes are sorted by URL. In the case of crib, the bootstrap node is the first node in the list. -func downloadNodeAPICredentials(nodeListPath string) []*node { - if _, err := os.Stat(nodeListPath); err == nil { - fmt.Println("Loading existing node host list at:", nodeListPath) - nodesList := mustReadNodesList(nodeListPath) - return nodesList +func downloadNodeSets(chainID int64, nodeSetPath string, nodeSetSize int) NodeSets { + if _, err := os.Stat(nodeSetPath); err == nil { + fmt.Println("Loading existing nodesets at:", nodeSetPath) + nodeSets := mustReadJSON[NodeSets](nodeSetPath) + return nodeSets } fmt.Println("Connecting to Kubernetes to fetch node credentials...") crib := NewCribClient() - clNodesWithCreds, err := crib.GetCLNodeCredentials() - - if err != nil { - panic(err) + nodes, err := crib.getCLNodes() + PanicErr(err) + + totalNodes := len(nodes) + // Workflow and StreamsTrigger nodeSets should have the same number of nodes + // hence we need at least 2 * nodeSetSize nodes + requiredNodes := nodeSetSize * 2 + if totalNodes < requiredNodes { + panic(fmt.Errorf("not enough nodes to populate both nodeSets: required %d, got %d", requiredNodes, totalNodes)) } - nodesList := clNodesWithCredsToNodes(clNodesWithCreds) - err = writeNodesList(nodeListPath, nodesList) - if err != nil { - panic(err) - } - if len(nodesList) == 0 { - panic("No nodes found") + nodeSets := NodeSets{ + Workflow: NodeSet{ + Name: WorkflowNodeSetName, + Prefix: WorkflowNodeSetPrefix, + Nodes: nodes[:nodeSetSize], + }, + StreamsTrigger: NodeSet{ + Name: StreamsTriggerNodeSetName, + Prefix: StreamsTriggerNodeSetPrefix, + Nodes: nodes[nodeSetSize : nodeSetSize*2], + }, } - return nodesList + + nodeSets.Workflow.NodeKeys = mustFetchNodeKeys(chainID, nodeSets.Workflow.Nodes, true) + nodeSets.StreamsTrigger.NodeKeys = mustFetchNodeKeys(chainID, nodeSets.StreamsTrigger.Nodes, false) + mustWriteJSON(nodeSetPath, nodeSets) + + return nodeSets } -func clNodesWithCredsToNodes(clNodesWithCreds []CLNodeCredentials) []*node { - nodes := []*node{} - for _, cl := range clNodesWithCreds { - n := node{ - url: cl.URL, - password: cl.Password, - login: cl.Username, - } - nodes = append(nodes, &n) - } +// NodeKeys represents the keys for a single node. +// If there are multiple OCR2KBs or OCR2AptosKBs, only the first one is used. +type NodeKeys struct { + AptosAccount string `json:"AptosAccount"` + EthAddress string `json:"EthAddress"` + P2PPeerID string `json:"P2PPeerID"` + CSAPublicKey string `json:"CSAPublicKey"` + OCR2KBTrimmed + OCR2AptosKBTrimmed +} - // sort nodes by URL - sort.Slice(nodes, func(i, j int) bool { - return nodes[i].url.String() < nodes[j].url.String() - }) - return nodes +// This is an OCR key bundle with the prefixes on each respective key +// trimmed off +type OCR2KBTrimmed struct { + OCR2BundleID string `json:"OCR2BundleID"` // used only in job spec + OCR2OnchainPublicKey string `json:"OCR2OnchainPublicKey"` // ocr2on_evm_ + OCR2OffchainPublicKey string `json:"OCR2OffchainPublicKey"` // ocr2off_evm_ + OCR2ConfigPublicKey string `json:"OCR2ConfigPublicKey"` // ocr2cfg_evm_ } -type ocr2Bundle struct { - ID string `json:"id"` - ChainType string `json:"chainType"` - OnchainPublicKey string `json:"onchainPublicKey"` - OffchainPublicKey string `json:"offchainPublicKey"` - ConfigPublicKey string `json:"configPublicKey"` +// This is an Aptos key bundle with the prefixes on each respective key +// trimmed off +type OCR2AptosKBTrimmed struct { + AptosBundleID string `json:"AptosBundleID"` + AptosOnchainPublicKey string `json:"AptosOnchainPublicKey"` // ocr2on_aptos_ } -func mustFetchNodesKeys(chainID int64, nodes []*node) (nca []changeset.NodeKeys) { - for _, n := range nodes { - output := &bytes.Buffer{} - client, app := newApp(n, output) - - fmt.Println("Logging in:", n.url) - loginFs := flag.NewFlagSet("test", flag.ContinueOnError) - loginFs.Bool("bypass-version-check", true, "") - loginCtx := cli.NewContext(app, loginFs, nil) - err := client.RemoteLogin(loginCtx) - helpers.PanicErr(err) - output.Reset() +func mustFetchNodeKeys(chainID int64, nodes []NodeWithCreds, createAptosKeys bool) []NodeKeys { + nodeKeys := []NodeKeys{} - err = client.ListETHKeys(&cli.Context{ - App: app, - }) - helpers.PanicErr(err) - var ethKeys []presenters.ETHKeyResource - helpers.PanicErr(json.Unmarshal(output.Bytes(), ðKeys)) - ethAddress, err := findFirstGoodEthKeyAddress(chainID, ethKeys) + for _, n := range nodes { + api := newNodeAPI(n) + // Get eth key + fmt.Printf("Fetching ETH keys for node %s\n", n.ServiceName) + eKey := api.mustExec(api.methods.ListETHKeys) + ethKeys := mustJSON[[]presenters.ETHKeyResource](eKey) + ethAddress, err := findFirstGoodEthKeyAddress(chainID, *ethKeys) helpers.PanicErr(err) - output.Reset() - keysClient := cmd.NewAptosKeysClient(client) - err = keysClient.ListKeys(&cli.Context{ - App: app, - }) - helpers.PanicErr(err) - var aptosKeys []presenters.AptosKeyResource - helpers.PanicErr(json.Unmarshal(output.Bytes(), &aptosKeys)) - if len(aptosKeys) != 1 { - helpers.PanicErr(errors.New("node must have single aptos key")) + var aptosAccount string + if createAptosKeys { + aptosAccount = getOrCreateAptosKey(api) } - aptosAccount := aptosKeys[0].Account - output.Reset() - err = client.ListP2PKeys(&cli.Context{ - App: app, - }) - helpers.PanicErr(err) - var p2pKeys []presenters.P2PKeyResource - helpers.PanicErr(json.Unmarshal(output.Bytes(), &p2pKeys)) - if len(p2pKeys) != 1 { + // Get p2p key + fmt.Printf("Fetching P2P key for node %s\n", n.ServiceName) + p2pKeys := api.mustExec(api.methods.ListP2PKeys) + p2pKey := mustJSON[[]presenters.P2PKeyResource](p2pKeys) + if len(*p2pKey) != 1 { helpers.PanicErr(errors.New("node must have single p2p key")) } - peerID := strings.TrimPrefix(p2pKeys[0].PeerID, "p2p_") - output.Reset() + peerID := strings.TrimPrefix((*p2pKey)[0].PeerID, "p2p_") + + // Get OCR2 key bundles for both EVM and Aptos chains + bundles := api.mustExec(api.methods.ListOCR2KeyBundles) + ocr2Bundles := mustJSON[cmd.OCR2KeyBundlePresenters](bundles) + + expectedBundleLen := 1 + + // evm key bundles + fmt.Printf("Fetching OCR2 EVM key bundles for node %s\n", n.ServiceName) + ocr2EvmBundles := getTrimmedEVMOCR2KBs(*ocr2Bundles) + evmBundleLen := len(ocr2EvmBundles) + if evmBundleLen < expectedBundleLen { + fmt.Printf("WARN: node has %d EVM OCR2 bundles when it should have at least %d, creating bundles...\n", evmBundleLen, expectedBundleLen) + for i := evmBundleLen; i < expectedBundleLen; i++ { + cBundle := api.withArg("evm").mustExec(api.methods.CreateOCR2KeyBundle) + createdBundle := mustJSON[cmd.OCR2KeyBundlePresenter](cBundle) + fmt.Printf("Created OCR2 EVM key bundle %s\n", string(cBundle)) + ocr2EvmBundles = append(ocr2EvmBundles, trimmedOCR2KB(*createdBundle)) + } + } - chainType := "evm" + // aptos key bundles + var ocr2AptosBundles []OCR2AptosKBTrimmed + if createAptosKeys { + fmt.Printf("Fetching OCR2 Aptos key bundles for node %s\n", n.ServiceName) + ocr2AptosBundles = createAptosOCR2KB(ocr2Bundles, expectedBundleLen, api) + } - var ocr2Bundles []ocr2Bundle - err = client.ListOCR2KeyBundles(&cli.Context{ - App: app, - }) + fmt.Printf("Fetching CSA keys for node %s\n", n.ServiceName) + csaKeys := api.mustExec(api.methods.ListCSAKeys) + csaKeyResources := mustJSON[[]presenters.CSAKeyResource](csaKeys) + csaPubKey, err := findFirstCSAPublicKey(*csaKeyResources) helpers.PanicErr(err) - helpers.PanicErr(json.Unmarshal(output.Bytes(), &ocr2Bundles)) - ocr2BundleIndex := findOCR2Bundle(ocr2Bundles, chainType) - output.Reset() - if ocr2BundleIndex == -1 { - fmt.Println("WARN: node does not have EVM OCR2 bundle, creating one") - fs := flag.NewFlagSet("test", flag.ContinueOnError) - err = fs.Parse([]string{chainType}) - helpers.PanicErr(err) - ocr2CreateBundleCtx := cli.NewContext(app, fs, nil) - err = client.CreateOCR2KeyBundle(ocr2CreateBundleCtx) - helpers.PanicErr(err) - output.Reset() - - err = client.ListOCR2KeyBundles(&cli.Context{ - App: app, - }) - helpers.PanicErr(err) - helpers.PanicErr(json.Unmarshal(output.Bytes(), &ocr2Bundles)) - ocr2BundleIndex = findOCR2Bundle(ocr2Bundles, chainType) - output.Reset() + + // We can handle multiple OCR bundles in the future + // but for now we only support a single bundle per node + keys := NodeKeys{ + OCR2KBTrimmed: ocr2EvmBundles[0], + EthAddress: ethAddress, + AptosAccount: aptosAccount, + P2PPeerID: peerID, + CSAPublicKey: strings.TrimPrefix(csaPubKey, "csa_"), } + if createAptosKeys { + keys.OCR2AptosKBTrimmed = ocr2AptosBundles[0] + } + + nodeKeys = append(nodeKeys, keys) + } - ocr2Bndl := ocr2Bundles[ocr2BundleIndex] - - aptosBundleIndex := findOCR2Bundle(ocr2Bundles, "aptos") - if aptosBundleIndex == -1 { - chainType2 := "aptos" - fmt.Println("WARN: node does not have Aptos OCR2 bundle, creating one") - fs := flag.NewFlagSet("test", flag.ContinueOnError) - err = fs.Parse([]string{chainType2}) - helpers.PanicErr(err) - ocr2CreateBundleCtx := cli.NewContext(app, fs, nil) - err = client.CreateOCR2KeyBundle(ocr2CreateBundleCtx) - helpers.PanicErr(err) - output.Reset() - - err = client.ListOCR2KeyBundles(&cli.Context{ - App: app, - }) - helpers.PanicErr(err) - helpers.PanicErr(json.Unmarshal(output.Bytes(), &ocr2Bundles)) - aptosBundleIndex = findOCR2Bundle(ocr2Bundles, chainType2) - output.Reset() + return nodeKeys +} + +func trimmedOCR2KB(ocr2Bndl cmd.OCR2KeyBundlePresenter) OCR2KBTrimmed { + return OCR2KBTrimmed{ + OCR2BundleID: ocr2Bndl.ID, + OCR2ConfigPublicKey: strings.TrimPrefix(ocr2Bndl.ConfigPublicKey, "ocr2cfg_evm_"), + OCR2OnchainPublicKey: strings.TrimPrefix(ocr2Bndl.OnchainPublicKey, "ocr2on_evm_"), + OCR2OffchainPublicKey: strings.TrimPrefix(ocr2Bndl.OffChainPublicKey, "ocr2off_evm_"), + } +} + +func trimmedAptosOCR2KB(ocr2Bndl cmd.OCR2KeyBundlePresenter) OCR2AptosKBTrimmed { + return OCR2AptosKBTrimmed{ + AptosBundleID: ocr2Bndl.ID, + AptosOnchainPublicKey: strings.TrimPrefix(ocr2Bndl.OnchainPublicKey, "ocr2on_aptos_"), + } +} + +func createAptosOCR2KB(ocr2Bundles *cmd.OCR2KeyBundlePresenters, expectedBundleLen int, api *nodeAPI) []OCR2AptosKBTrimmed { + ocr2AptosBundles := getTrimmedAptosOCR2KBs(*ocr2Bundles) + aptosBundleLen := len(ocr2AptosBundles) + + if aptosBundleLen < expectedBundleLen { + fmt.Printf("WARN: node has %d Aptos OCR2 bundles when it should have at least %d, creating bundles...\n", aptosBundleLen, expectedBundleLen) + for i := aptosBundleLen; i < expectedBundleLen; i++ { + cBundle := api.withArg("aptos").mustExec(api.methods.CreateOCR2KeyBundle) + createdBundle := mustJSON[cmd.OCR2KeyBundlePresenter](cBundle) + fmt.Println("Created OCR2 Aptos key bundle", string(cBundle)) + ocr2AptosBundles = append(ocr2AptosBundles, trimmedAptosOCR2KB(*createdBundle)) } + } - aptosBundle := ocr2Bundles[aptosBundleIndex] + return ocr2AptosBundles +} - err = client.ListCSAKeys(&cli.Context{ - App: app, - }) +// getOrCreateAptosKey returns the Aptos account of the node. +// +// If the node has no Aptos keys, it creates one and returns the account. +func getOrCreateAptosKey(api *nodeAPI) string { + api.output.Reset() + aKeysClient := cmd.NewAptosKeysClient(api.methods) + err := aKeysClient.ListKeys(&cli.Context{App: api.app}) + helpers.PanicErr(err) + var aptosKeys []presenters.AptosKeyResource + helpers.PanicErr(json.Unmarshal(api.output.Bytes(), &aptosKeys)) + if len(aptosKeys) == 0 { + api.output.Reset() + fmt.Printf("WARN: node has no aptos keys, creating one...\n") + err = aKeysClient.CreateKey(&cli.Context{App: api.app}) helpers.PanicErr(err) - var csaKeys []presenters.CSAKeyResource - helpers.PanicErr(json.Unmarshal(output.Bytes(), &csaKeys)) - csaPubKey, err := findFirstCSAPublicKey(csaKeys) + api.output.Reset() + err = aKeysClient.ListKeys(&cli.Context{App: api.app}) helpers.PanicErr(err) - output.Reset() - - nc := changeset.NodeKeys{ - EthAddress: ethAddress, - AptosAccount: aptosAccount, - P2PPeerID: peerID, - AptosBundleID: aptosBundle.ID, - AptosOnchainPublicKey: strings.TrimPrefix(aptosBundle.OnchainPublicKey, fmt.Sprintf("ocr2on_%s_", "aptos")), - OCR2BundleID: ocr2Bndl.ID, - OCR2ConfigPublicKey: strings.TrimPrefix(ocr2Bndl.ConfigPublicKey, fmt.Sprintf("ocr2cfg_%s_", chainType)), - OCR2OnchainPublicKey: strings.TrimPrefix(ocr2Bndl.OnchainPublicKey, fmt.Sprintf("ocr2on_%s_", chainType)), - OCR2OffchainPublicKey: strings.TrimPrefix(ocr2Bndl.OffchainPublicKey, fmt.Sprintf("ocr2off_%s_", chainType)), - CSAPublicKey: csaPubKey, + helpers.PanicErr(json.Unmarshal(api.output.Bytes(), &aptosKeys)) + api.output.Reset() + } + + if len(aptosKeys) != 1 { + fmt.Printf("Node has %d aptos keys\n", len(aptosKeys)) + PanicErr(errors.New("node must have single aptos key")) + } + + aptosAccount := aptosKeys[0].Account + api.output.Reset() + + return aptosAccount +} + +func getTrimmedAptosOCR2KBs(ocr2Bundles cmd.OCR2KeyBundlePresenters) []OCR2AptosKBTrimmed { + aptosBundles := []OCR2AptosKBTrimmed{} + for _, b := range ocr2Bundles { + if b.ChainType == "aptos" { + aptosBundles = append(aptosBundles, trimmedAptosOCR2KB(b)) } + } + return aptosBundles +} - nca = append(nca, nc) +func getTrimmedEVMOCR2KBs(ocr2Bundles cmd.OCR2KeyBundlePresenters) []OCR2KBTrimmed { + evmBundles := []OCR2KBTrimmed{} + for _, b := range ocr2Bundles { + if b.ChainType == "evm" { + evmBundles = append(evmBundles, trimmedOCR2KB(b)) + } } - return + return evmBundles } func findFirstCSAPublicKey(csaKeyResources []presenters.CSAKeyResource) (string, error) { @@ -234,21 +275,9 @@ func findFirstCSAPublicKey(csaKeyResources []presenters.CSAKeyResource) (string, return "", errors.New("did not find any CSA Key Resources") } -func findOCR2Bundle(ocr2Bundles []ocr2Bundle, chainType string) int { - for i, b := range ocr2Bundles { - if b.ChainType == chainType { - return i - } - } - return -1 -} - func findFirstGoodEthKeyAddress(chainID int64, ethKeys []presenters.ETHKeyResource) (string, error) { for _, ethKey := range ethKeys { if ethKey.EVMChainID.Equal(ubig.NewI(chainID)) && !ethKey.Disabled { - if ethKey.EthBalance.IsZero() { - fmt.Println("WARN: selected ETH address has zero balance", ethKey.Address) - } return ethKey.Address, nil } } diff --git a/core/scripts/keystone/src/99_files.go b/core/scripts/keystone/src/99_files.go index 08ba12e4194..1848ba8fae9 100644 --- a/core/scripts/keystone/src/99_files.go +++ b/core/scripts/keystone/src/99_files.go @@ -1,71 +1,54 @@ package src import ( - "bufio" "encoding/json" "fmt" "io" "os" - - "github.com/smartcontractkit/chainlink/v2/core/utils" ) const ( defaultArtefactsDir = "artefacts" - defaultPublicKeys = ".cache/PublicKeys.json" - defaultNodeList = ".cache/NodeList.txt" + defaultNodeSetsPath = ".cache/node_sets.json" deployedContractsJSON = "deployed_contracts.json" - bootstrapSpecTemplate = "bootstrap.toml" - cribOverrideTemplate = "crib-overrides.yaml" - oracleSpecTemplate = "oracle.toml" ) -func writeLines(lines []string, path string) error { - file, err := os.Create(path) +func mustReadJSON[T any](fileName string) (output T) { + jsonFile, err := os.Open(fileName) if err != nil { - return err - } - wc := utils.NewDeferableWriteCloser(file) - defer wc.Close() - - w := bufio.NewWriter(file) - for _, line := range lines { - fmt.Fprintln(w, line) - } - if err := w.Flush(); err != nil { - return err + panic(fmt.Sprintf("failed to open file at %s: %v", fileName, err)) } - return wc.Close() -} - -func readLines(path string) ([]string, error) { - file, err := os.Open(path) + defer jsonFile.Close() + bytes, err := io.ReadAll(jsonFile) if err != nil { - return nil, err + panic(fmt.Sprintf("failed to read file at %s: %v", fileName, err)) } - defer file.Close() - - var lines []string - scanner := bufio.NewScanner(file) - for scanner.Scan() { - lines = append(lines, scanner.Text()) + err = json.Unmarshal(bytes, &output) + if err != nil { + panic(fmt.Sprintf("failed to unmarshal data: %v", err)) } - return lines, scanner.Err() + return } -func mustParseJSON[T any](fileName string) (output T) { - jsonFile, err := os.Open(fileName) +func mustWriteJSON[T any](fileName string, data T) { + jsonFile, err := os.Create(fileName) if err != nil { - panic(err) + panic(fmt.Sprintf("failed to create file at %s: %v", fileName, err)) } defer jsonFile.Close() - bytes, err := io.ReadAll(jsonFile) + encoder := json.NewEncoder(jsonFile) + encoder.SetIndent("", " ") + err = encoder.Encode(data) if err != nil { - panic(err) + panic(fmt.Sprintf("failed to encode data: %v", err)) } - err = json.Unmarshal(bytes, &output) +} + +func ensureArtefactsDir(artefactsDir string) { + _, err := os.Stat(artefactsDir) if err != nil { - panic(err) + fmt.Println("Creating artefacts directory" + artefactsDir) + err = os.MkdirAll(artefactsDir, 0700) + PanicErr(err) } - return } diff --git a/core/scripts/keystone/src/99_files_test.go b/core/scripts/keystone/src/99_files_test.go deleted file mode 100644 index 83ceb5cd9cc..00000000000 --- a/core/scripts/keystone/src/99_files_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package src - -import ( - "path/filepath" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func Test_writeLines(t *testing.T) { - type args struct { - lines []string - } - tests := []struct { - name string - args args - }{ - { - name: "write read lines", - args: args{ - lines: []string{"a", "b"}, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pth := filepath.Join(t.TempDir(), strings.ReplaceAll(tt.name, " ", "_")) - err := writeLines(tt.args.lines, pth) - assert.NoError(t, err) - got, err := readLines(pth) - assert.NoError(t, err) - assert.Equal(t, tt.args.lines, got) - }) - } -} diff --git a/core/scripts/keystone/src/99_k8s_client.go b/core/scripts/keystone/src/99_k8s_client.go index 55a0ac82bcb..e4885e53a19 100644 --- a/core/scripts/keystone/src/99_k8s_client.go +++ b/core/scripts/keystone/src/99_k8s_client.go @@ -2,11 +2,13 @@ package src import ( "context" + "errors" "fmt" "log" "sort" "strings" + apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" networkingV1 "k8s.io/api/networking/v1" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -63,32 +65,33 @@ func MustNewK8sClient() *K8sClient { } } -type PodWithConfigMap struct { - v1.Pod - ConfigMap v1.ConfigMap - Host string +type DeploymentWithConfigMap struct { + apps.Deployment + ServiceName string + ConfigMap v1.ConfigMap + Host string } -func (m *K8sClient) GetPodsWithConfigMap() ([]PodWithConfigMap, error) { - pods, err := m.ListPods("app=app") +func (m *K8sClient) GetDeploymentsWithConfigMap() ([]DeploymentWithConfigMap, error) { + deployments, err := m.ListDeployments("app=app") if err != nil { return nil, err } - if len(pods.Items) == 0 { - return nil, fmt.Errorf("no chainlink node crib pods found, is your crib cluster deployed?") + if len(deployments.Items) == 0 { + return nil, errors.New("no deployments found, is your nodeset deployed?") } - podsWithConfigMaps := []PodWithConfigMap{} + deploymentsWithConfigMaps := []DeploymentWithConfigMap{} ingressList, err := m.ListIngresses() if err != nil { return nil, err } if len(ingressList.Items) == 0 { - return nil, fmt.Errorf("no ingress found, is your crib cluster deployed?") + return nil, errors.New("no ingress found, is your nodeset deployed?") } - for _, pod := range pods.Items { - for _, v := range pod.Spec.Volumes { + for _, deployment := range deployments.Items { + for _, v := range deployment.Spec.Template.Spec.Volumes { if v.ConfigMap == nil { continue } @@ -96,53 +99,48 @@ func (m *K8sClient) GetPodsWithConfigMap() ([]PodWithConfigMap, error) { if err != nil { return nil, err } - // - host: crib-henry-keystone-node2.main.stage.cldev.sh - // http: - // paths: - // - backend: - // service: - // name: app-node-2 - // port: - // number: 6688 - // path: /* - // pathType: ImplementationSpecific - instance := pod.Labels["instance"] + instance := deployment.Labels["instance"] var host string + var serviceName string for _, ingress := range ingressList.Items { for _, rule := range ingress.Spec.Rules { for _, path := range rule.HTTP.Paths { if strings.Contains(path.Backend.Service.Name, instance) { host = rule.Host + serviceName = path.Backend.Service.Name } } } } if host == "" { - return nil, fmt.Errorf("could not find host for pod %s", pod.Name) + return nil, fmt.Errorf("could not find host for deployment %s", deployment.Name) } - podWithConfigMap := PodWithConfigMap{ - Host: host, - Pod: pod, - ConfigMap: *cm, + deploymentWithConfigMap := DeploymentWithConfigMap{ + Host: host, + ServiceName: serviceName, + Deployment: deployment, + ConfigMap: *cm, } - podsWithConfigMaps = append(podsWithConfigMaps, podWithConfigMap) + deploymentsWithConfigMaps = append(deploymentsWithConfigMaps, deploymentWithConfigMap) } } - fmt.Printf("Found %d chainlink node crib pods\n", len(podsWithConfigMaps)) - return podsWithConfigMaps, nil + fmt.Printf("Found %d deployments with config maps\n", len(deploymentsWithConfigMaps)) + return deploymentsWithConfigMaps, nil } -// ListPods lists pods for a namespace and selector -func (m *K8sClient) ListPods(selector string) (*v1.PodList, error) { - pods, err := m.ClientSet.CoreV1().Pods(m.namespace).List(context.Background(), metaV1.ListOptions{LabelSelector: selector}) - sort.Slice(pods.Items, func(i, j int) bool { - return pods.Items[i].CreationTimestamp.Before(pods.Items[j].CreationTimestamp.DeepCopy()) +// ListDeployments lists deployments for a namespace +func (m *K8sClient) ListDeployments(selector string) (*apps.DeploymentList, error) { + deployments, err := m.ClientSet.AppsV1().Deployments(m.namespace).List(context.Background(), metaV1.ListOptions{LabelSelector: selector}) + if err != nil { + return nil, err + } + sort.Slice(deployments.Items, func(i, j int) bool { + return deployments.Items[i].CreationTimestamp.Before(deployments.Items[j].CreationTimestamp.DeepCopy()) }) - - return pods.DeepCopy(), err + return deployments.DeepCopy(), nil } // Get a config map diff --git a/core/scripts/keystone/src/99_nodes.go b/core/scripts/keystone/src/99_nodes.go deleted file mode 100644 index 68d3621ce63..00000000000 --- a/core/scripts/keystone/src/99_nodes.go +++ /dev/null @@ -1,72 +0,0 @@ -package src - -import ( - "errors" - "fmt" - "net/url" - "strings" - - helpers "github.com/smartcontractkit/chainlink/core/scripts/common" -) - -type node struct { - url *url.URL - remoteURL *url.URL - login string - password string -} - -func (n node) IsTerminal() bool { - return false -} - -func (n node) PasswordPrompt(p string) string { - return n.password -} - -func (n node) Prompt(p string) string { - return n.login -} - -func writeNodesList(path string, nodes []*node) error { - fmt.Println("Writing nodes list to", path) - var lines []string - for _, n := range nodes { - lines = append(lines, fmt.Sprintf("%s %s %s", n.url.String(), n.login, n.password)) - } - - return writeLines(lines, path) -} - -func mustReadNodesList(path string) []*node { - fmt.Println("Reading nodes list from", path) - nodesList, err := readLines(path) - helpers.PanicErr(err) - - var nodes []*node - var hasBoot bool - for _, r := range nodesList { - rr := strings.TrimSpace(r) - if len(rr) == 0 { - continue - } - s := strings.Split(rr, " ") - if len(s) != 4 { - helpers.PanicErr(errors.New("wrong nodes list format")) - } - if strings.Contains(s[0], "boot") && hasBoot { - helpers.PanicErr(errors.New("the single boot node must come first")) - } - hasBoot = true - url, err := url.Parse(s[0]) - remoteURL, err := url.Parse(s[1]) - helpers.PanicErr(err) - nodes = append(nodes, &node{ - url: url, - remoteURL: remoteURL, - login: s[2], - password: s[3], - }) - } - return nodes -} diff --git a/core/scripts/keystone/src/__snapshots__/02_deploy_keystone_workflows_test.snap b/core/scripts/keystone/src/__snapshots__/02_deploy_keystone_workflows_test.snap new file mode 100755 index 00000000000..8556ca9304c --- /dev/null +++ b/core/scripts/keystone/src/__snapshots__/02_deploy_keystone_workflows_test.snap @@ -0,0 +1,57 @@ + +[TestCreateKeystoneWorkflowJob - 1] + +type = "workflow" +schemaVersion = 1 +name = "keystone_workflow" +workflow = """ +name: "ccip_kiab1" +owner: '0x1234567890abcdef1234567890abcdef12345678' +triggers: + - id: streams-trigger@1.1.0 + config: + maxFrequencyMs: 10000 + feedIds: + - 'feed1' + - 'feed2' + - 'feed3' + +consensus: + - id: offchain_reporting@1.0.0 + ref: ccip_feeds + inputs: + observations: + - $(trigger.outputs) + config: + report_id: '0001' + key_id: 'evm' + aggregation_method: data_feeds + aggregation_config: + feeds: + 'feed1': + deviation: '0.05' + heartbeat: 1800 + 'feed2': + deviation: '0.05' + heartbeat: 1800 + 'feed3': + deviation: '0.05' + heartbeat: 1800 + encoder: EVM + encoder_config: + abi: "(bytes32 FeedID, uint224 Price, uint32 Timestamp)[] Reports" + abi: (bytes32 FeedID, uint224 Price, uint32 Timestamp)[] Reports + +targets: + - id: target_id + inputs: + signed_report: $(ccip_feeds.outputs) + config: + address: '0xabcdefabcdefabcdefabcdefabcdefabcdef' + deltaStage: 5s + schedule: oneAtATime + +""" +workflowOwner = "0x1234567890abcdef1234567890abcdef12345678" + +--- diff --git a/core/scripts/keystone/src/__snapshots__/02_provision_crib_test.snap b/core/scripts/keystone/src/__snapshots__/02_provision_crib_test.snap new file mode 100755 index 00000000000..06532bea727 --- /dev/null +++ b/core/scripts/keystone/src/__snapshots__/02_provision_crib_test.snap @@ -0,0 +1,415 @@ + +[TestGeneratePostprovisionConfig - 1] +helm: + values: + chainlink: + nodes: + 0-ks-wf-bt-node1: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + 0-ks-wf-node2: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + + [EVM.Workflow] + FromAddress = '0x75cf1355cC4Eb358feaBb9e269a4DAEeB6721DBB' + ForwarderAddress = '0x0100000000000000000000000000000000000000' + 0-ks-wf-node3: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + + [EVM.Workflow] + FromAddress = '0xc6dcE30f492CBD223b9946603192f22D86e783ca' + ForwarderAddress = '0x0100000000000000000000000000000000000000' + 0-ks-wf-node4: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + + [EVM.Workflow] + FromAddress = '0x1289d00A6565Afcd6437B09548F6019EF49696d0' + ForwarderAddress = '0x0100000000000000000000000000000000000000' + 0-ks-wf-node5: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + + [EVM.Workflow] + FromAddress = '0x4b92B0aaC39932B7302676F48e78FA91852DC0EE' + ForwarderAddress = '0x0100000000000000000000000000000000000000' + 1-ks-str-trig-bt-node1: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + 1-ks-str-trig-node2: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + 1-ks-str-trig-node3: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + 1-ks-str-trig-node4: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + 1-ks-str-trig-node5: + image: ${runtime.images.app} + overridesToml: | + [Capabilities] + [Capabilities.Peering] + [Capabilities.Peering.V2] + Enabled = true + DefaultBootstrappers = ['12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6691'] + ListenAddresses = ['0.0.0.0:6691'] + + [Capabilities.ExternalRegistry] + Address = '0x0200000000000000000000000000000000000000' + NetworkID = 'evm' + ChainID = '1337' + + [[EVM]] + ChainID = '1337' + Nodes = [] + ingress: + hosts: + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-bt-node1.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-bt-node1 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node2.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node2 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node3.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node3 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node4.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node4 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node5.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node5 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-bt-node1.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-bt-node1 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node2.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node2 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node3.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node3 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node4.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node4 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node5.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node5 + port: + number: 6688 + +--- + +[TestGeneratePreprovisionConfig - 1] +helm: + values: + chainlink: + nodes: + 0-ks-wf-bt-node1: + image: ${runtime.images.app} + 0-ks-wf-node2: + image: ${runtime.images.app} + 0-ks-wf-node3: + image: ${runtime.images.app} + 0-ks-wf-node4: + image: ${runtime.images.app} + 0-ks-wf-node5: + image: ${runtime.images.app} + 1-ks-str-trig-bt-node1: + image: ${runtime.images.app} + 1-ks-str-trig-node2: + image: ${runtime.images.app} + 1-ks-str-trig-node3: + image: ${runtime.images.app} + 1-ks-str-trig-node4: + image: ${runtime.images.app} + 1-ks-str-trig-node5: + image: ${runtime.images.app} + ingress: + hosts: + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-bt-node1.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-bt-node1 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node2.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node2 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node3.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node3 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node4.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node4 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-0-ks-wf-node5.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-0-ks-wf-node5 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-bt-node1.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-bt-node1 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node2.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node2 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node3.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node3 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node4.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node4 + port: + number: 6688 + - host: ${DEVSPACE_NAMESPACE}-1-ks-str-trig-node5.${DEVSPACE_INGRESS_BASE_DOMAIN} + http: + paths: + - path: / + backend: + service: + name: app-1-ks-str-trig-node5 + port: + number: 6688 + +--- diff --git a/core/scripts/keystone/src/__snapshots__/02_provision_ocr3_capability_test.snap b/core/scripts/keystone/src/__snapshots__/02_provision_ocr3_capability_test.snap new file mode 100755 index 00000000000..9d38f78899b --- /dev/null +++ b/core/scripts/keystone/src/__snapshots__/02_provision_ocr3_capability_test.snap @@ -0,0 +1,65 @@ + +[TestGenerateOCR3Config - 1] +{ + "F": 1, + "OffchainConfig": "", + "OffchainConfigVersion": 30, + "OnchainConfig": "0x", + "Signers": [ + "011400321bc7af41a634375526006365a31bf32b4cfa7c0520004ca789105da974eec967758ad32b575741d6cb36c1bb3bcfd87b235502cc1753", + "0114005192c43a68efb7a698c0459ff8591a115da128ee052000169008927a60e6c03e99aac6fa268dabaf4d00e117419861d87836211267361b", + "011400ed613636925af2df6ed8332d95028eabcbe95a3f052000ce86b34de67249f92058f69e47961907ebbf8a71c12123f1d2a7cab4874f6365", + "01140053b5bbc0efa2e2d2770029bab5d5a647a260a72b052000f2cb4932d3ce8c10bf67c60d35372a5ff1578255e25c2a119c2dea70e919567a" + ], + "Transmitters": [ + "0x75cf1355cC4Eb358feaBb9e269a4DAEeB6721DBB", + "0xc6dcE30f492CBD223b9946603192f22D86e783ca", + "0x1289d00A6565Afcd6437B09548F6019EF49696d0", + "0x4b92B0aaC39932B7302676F48e78FA91852DC0EE" + ] +} +--- + +[TestGenSpecs - 1] + +type = "bootstrap" +schemaVersion = 1 +name = "ocr3_bootstrap" +contractID = "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" +relay = "evm" + +[relayConfig] +chainID = "1337" +providerType = "ocr3-capability" + + + +type = "offchainreporting2" +schemaVersion = 1 +name = "ocr3_oracle" +contractID = "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" +ocrKeyBundleID = "20ccdc97afdf467465590115e3da4e5eb591bf5f43808e81a5d0807cd889b3c7" +p2pv2Bootstrappers = [ + "12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq@app-0-ks-wf-bt-node1:6690", +] +relay = "evm" +pluginType = "plugin" +transmitterID = "12D3KooWHhXyDmHB6D1UQosLXmhczw3zxB3DLYBuq9Unb4iCD4Sc" + +[relayConfig] +chainID = "1337" + +[pluginConfig] +command = "chainlink-ocr3-capability" +ocrVersion = 3 +pluginName = "ocr-capability" +providerType = "ocr3-capability" +telemetryType = "plugin" + +[onchainSigningStrategy] +strategyName = 'multi-chain' +[onchainSigningStrategy.config] +evm = "20ccdc97afdf467465590115e3da4e5eb591bf5f43808e81a5d0807cd889b3c7" +aptos = "ac364cec9fe7d9ea1035fc511e5b2f30900caa6e65ac0501168005d05129e088" + +--- diff --git a/core/scripts/keystone/src/__snapshots__/02_provision_streams_trigger_capability_test.snap b/core/scripts/keystone/src/__snapshots__/02_provision_streams_trigger_capability_test.snap new file mode 100755 index 00000000000..07ac61b7264 --- /dev/null +++ b/core/scripts/keystone/src/__snapshots__/02_provision_streams_trigger_capability_test.snap @@ -0,0 +1,50 @@ + +[TestCreateMercuryV3Job - 1] + +type = "offchainreporting2" +schemaVersion = 1 +name = "mercury-BTC/USD" +p2pv2Bootstrappers = ["crib-henry-keystone-node1.main.stage.cldev.sh"] +forwardingAllowed = false +maxTaskDuration = "1s" +contractID = "0x0700000000000000000000000000000000000000" +feedID = "0x0100000000000000000000000000000000000000000000000000000000000000" +contractConfigTrackerPollInterval = "1s" +ocrKeyBundleID = "ocr_key_bundle_id" +relay = "evm" +pluginType = "mercury" +transmitterID = "node_csa_key" +observationSource = """ + price [type=bridge name="bridge_name" timeout="50ms" requestData=""]; + + benchmark_price [type=jsonparse path="result,mid" index=0]; + price -> benchmark_price; + + bid_price [type=jsonparse path="result,bid" index=1]; + price -> bid_price; + + ask_price [type=jsonparse path="result,ask" index=2]; + price -> ask_price; +""" + +[relayConfig] +enableTriggerCapability = true +chainID = "123456" + +--- + +[TestCreateMercuryBootstrapJob - 1] + +type = "bootstrap" +relay = "evm" +schemaVersion = 1 +name = "boot-BTC/USD" +contractID = "0x0700000000000000000000000000000000000000" +feedID = "0x0100000000000000000000000000000000000000000000000000000000000000" +contractConfigTrackerPollInterval = "1s" + +[relayConfig] +chainID = 123456 +enableTriggerCapability = true + +--- diff --git a/core/scripts/keystone/src/__snapshots__/03_gen_crib_cluster_overrides_cmd_test.snap b/core/scripts/keystone/src/__snapshots__/03_gen_crib_cluster_overrides_cmd_test.snap deleted file mode 100755 index 08b79a9f4f9..00000000000 --- a/core/scripts/keystone/src/__snapshots__/03_gen_crib_cluster_overrides_cmd_test.snap +++ /dev/null @@ -1,44 +0,0 @@ - -[TestGenerateCribConfig - 1] -helm: - values: - chainlink: - nodes: - node1: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - node2: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '0x8B60FDcc9CAC8ea476b31d17011CB204471431d9' - ForwarderAddress = '0x1234567890abcdef' - node3: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '0x6620F516F29979B214e2451498a057FDd3a0A85d' - ForwarderAddress = '0x1234567890abcdef' - node4: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '0xFeB61E22FCf4F9740c9D96b05199F195bd61A7c2' - ForwarderAddress = '0x1234567890abcdef' - node5: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '0x882Fd04D78A7e7D386Dd5b550f19479E5494B0B2' - ForwarderAddress = '0x1234567890abcdef' ---- diff --git a/core/scripts/keystone/src/__snapshots__/88_gen_jobspecs_test.snap b/core/scripts/keystone/src/__snapshots__/88_gen_jobspecs_test.snap deleted file mode 100755 index c0c7c7d7e67..00000000000 --- a/core/scripts/keystone/src/__snapshots__/88_gen_jobspecs_test.snap +++ /dev/null @@ -1,140 +0,0 @@ - -[TestGenSpecs - 1] -Bootstrap: -Host: crib-henry-keystone-node1.main.stage.cldev.sh -type = "bootstrap" -schemaVersion = 1 -name = "Keystone boot" -contractID = "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" -relay = "evm" - -[relayConfig] -chainID = "11155111" -providerType = "ocr3-capability" - -Oracles: -Oracle 0: -Host: crib-henry-keystone-node2.main.stage.cldev.sh -type = "offchainreporting2" -schemaVersion = 1 -name = "Keystone" -contractID = "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" -ocrKeyBundleID = "b3df4d8748b67731a1112e8b45a764941974f5590c93672eebbc4f3504dd10ed" -p2pv2Bootstrappers = [ - "12D3KooWNmhKZL1XW4Vv3rNjLXzJ6mqcVerihdijjGYuexPrFUFZ@crib-henry-keystone-node1.main.stage.cldev.sh:6690", -] -relay = "evm" -pluginType = "plugin" -transmitterID = "0x8B60FDcc9CAC8ea476b31d17011CB204471431d9" - -[relayConfig] -chainID = "11155111" - -[pluginConfig] -command = "chainlink-ocr3-capability" -ocrVersion = 3 -pluginName = "ocr-capability" -providerType = "ocr3-capability" -telemetryType = "plugin" - -[onchainSigningStrategy] -strategyName = 'multi-chain' -[onchainSigningStrategy.config] -evm = "b3df4d8748b67731a1112e8b45a764941974f5590c93672eebbc4f3504dd10ed" -aptos = "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfb" - --------------------------------- -Oracle 1: -Host: crib-henry-keystone-node3.main.stage.cldev.sh -type = "offchainreporting2" -schemaVersion = 1 -name = "Keystone" -contractID = "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" -ocrKeyBundleID = "38459ae37f29f2c1fde0f25972a973322be8cada82acf43f464756836725be97" -p2pv2Bootstrappers = [ - "12D3KooWNmhKZL1XW4Vv3rNjLXzJ6mqcVerihdijjGYuexPrFUFZ@crib-henry-keystone-node1.main.stage.cldev.sh:6690", -] -relay = "evm" -pluginType = "plugin" -transmitterID = "0x6620F516F29979B214e2451498a057FDd3a0A85d" - -[relayConfig] -chainID = "11155111" - -[pluginConfig] -command = "chainlink-ocr3-capability" -ocrVersion = 3 -pluginName = "ocr-capability" -providerType = "ocr3-capability" -telemetryType = "plugin" - -[onchainSigningStrategy] -strategyName = 'multi-chain' -[onchainSigningStrategy.config] -evm = "38459ae37f29f2c1fde0f25972a973322be8cada82acf43f464756836725be97" -aptos = "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfc" - --------------------------------- -Oracle 2: -Host: crib-henry-keystone-node4.main.stage.cldev.sh -type = "offchainreporting2" -schemaVersion = 1 -name = "Keystone" -contractID = "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" -ocrKeyBundleID = "b5dbc4c9da983cddde2e3226b85807eb7beaf818694a22576af4d80f352702ed" -p2pv2Bootstrappers = [ - "12D3KooWNmhKZL1XW4Vv3rNjLXzJ6mqcVerihdijjGYuexPrFUFZ@crib-henry-keystone-node1.main.stage.cldev.sh:6690", -] -relay = "evm" -pluginType = "plugin" -transmitterID = "0xFeB61E22FCf4F9740c9D96b05199F195bd61A7c2" - -[relayConfig] -chainID = "11155111" - -[pluginConfig] -command = "chainlink-ocr3-capability" -ocrVersion = 3 -pluginName = "ocr-capability" -providerType = "ocr3-capability" -telemetryType = "plugin" - -[onchainSigningStrategy] -strategyName = 'multi-chain' -[onchainSigningStrategy.config] -evm = "b5dbc4c9da983cddde2e3226b85807eb7beaf818694a22576af4d80f352702ed" -aptos = "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfd" - --------------------------------- -Oracle 3: -Host: crib-henry-keystone-node5.main.stage.cldev.sh -type = "offchainreporting2" -schemaVersion = 1 -name = "Keystone" -contractID = "0xB29934624cAe3765E33115A9530a13f5aEC7fa8A" -ocrKeyBundleID = "260d5c1a618cdf5324509d7db95f5a117511864ebb9e1f709e8969339eb225af" -p2pv2Bootstrappers = [ - "12D3KooWNmhKZL1XW4Vv3rNjLXzJ6mqcVerihdijjGYuexPrFUFZ@crib-henry-keystone-node1.main.stage.cldev.sh:6690", -] -relay = "evm" -pluginType = "plugin" -transmitterID = "0x882Fd04D78A7e7D386Dd5b550f19479E5494B0B2" - -[relayConfig] -chainID = "11155111" - -[pluginConfig] -command = "chainlink-ocr3-capability" -ocrVersion = 3 -pluginName = "ocr-capability" -providerType = "ocr3-capability" -telemetryType = "plugin" - -[onchainSigningStrategy] -strategyName = 'multi-chain' -[onchainSigningStrategy.config] -evm = "260d5c1a618cdf5324509d7db95f5a117511864ebb9e1f709e8969339eb225af" -aptos = "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfe" - - ---- diff --git a/core/scripts/keystone/src/__snapshots__/88_gen_ocr3_config_test.snap b/core/scripts/keystone/src/__snapshots__/88_gen_ocr3_config_test.snap deleted file mode 100755 index eac3cdaff4c..00000000000 --- a/core/scripts/keystone/src/__snapshots__/88_gen_ocr3_config_test.snap +++ /dev/null @@ -1,23 +0,0 @@ - -[TestGenerateOCR3Config - 1] -{ - "F": 1, - "OffchainConfig": "", - "OffchainConfigVersion": 30, - "OnchainConfig": "0x", - "Signers": [ - "011400a2402db8e549f094ea31e1c0edd77623f4ca5b12052000ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4a", - "0114004af19c802b244d1d085492c3946391c965e10519052000ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4b", - "01140061925685d2b80b121537341d063c4e57b2f9323c052000ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4c", - "011400fd97efd53fc20acc098fcd746c04d8d7540d97e0052000ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4d", - "011400a0b67dc5345a71d02b396147ae2cb75dda63cbe9052000ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4e" - ], - "Transmitters": [ - "0xF4e7e516146c8567F8E8be0ED1f1A92798628d35", - "0x8B60FDcc9CAC8ea476b31d17011CB204471431d9", - "0x6620F516F29979B214e2451498a057FDd3a0A85d", - "0xFeB61E22FCf4F9740c9D96b05199F195bd61A7c2", - "0x882Fd04D78A7e7D386Dd5b550f19479E5494B0B2" - ] -} ---- diff --git a/core/scripts/keystone/src/external-adapter/.goreleaser.yaml b/core/scripts/keystone/src/external-adapter/.goreleaser.yaml new file mode 100644 index 00000000000..524be367e09 --- /dev/null +++ b/core/scripts/keystone/src/external-adapter/.goreleaser.yaml @@ -0,0 +1,49 @@ +project_name: kiab-mock-external-adapter +version: 2 + +builds: + - targets: + - go_first_class + no_unique_dist_dir: true + binary: kiab-mock-external-adapter + env: + - CGO_ENABLED=0 + +dockers: + - id: linux-arm64 + use: buildx + goos: linux + goarch: arm64 + image_templates: + - "{{ .Env.IMAGE }}" + build_flag_templates: + - --platform=linux/arm64 + + - id: linux-amd64 + use: buildx + goos: linux + goarch: amd64 + image_templates: + - "{{ .Env.IMAGE }}" + build_flag_templates: + - --platform=linux/amd64 +docker_manifests: + - name_template: '{{ .Env.IMAGE }}' + image_templates: + - '{{ .Env.IMAGE }}' +archives: + - format: binary + +release: + disable: true +changelog: + disable: true + +nightly: + version_template: "{{ .ProjectName }}-{{ .ShortCommit }}" + +snapshot: + version_template: "{{ .ProjectName }}-{{ .ShortCommit }}" + +partial: + by: target diff --git a/core/scripts/keystone/src/external-adapter/99_external_adapter.go b/core/scripts/keystone/src/external-adapter/99_external_adapter.go new file mode 100644 index 00000000000..8af035f30fd --- /dev/null +++ b/core/scripts/keystone/src/external-adapter/99_external_adapter.go @@ -0,0 +1,154 @@ +package main + +import ( + "fmt" + "math/rand" + "net" + "net/http" + "net/http/httptest" + "os" + "strconv" + "sync" + "time" +) + +func PanicErr(err error) { + if err != nil { + panic(err) + } +} + +// Price struct encapsulates bid, mid, ask values along with a mutex for synchronization +type Price struct { + mu sync.RWMutex + Bid float64 + Mid float64 + Ask float64 +} + +// Update safely updates the price values within the specified bounds +func (p *Price) Update(step, floor, ceiling float64) { + p.mu.Lock() + defer p.mu.Unlock() + + p.Mid = adjustValue(p.Mid, step, floor, ceiling) + p.Bid = adjustValue(p.Mid, step, floor, p.Mid) + p.Ask = adjustValue(p.Mid, step, p.Mid, ceiling) +} + +// GetSnapshot safely retrieves a copy of the current price values +func (p *Price) GetSnapshot() (bid, mid, ask float64) { + p.mu.RLock() + defer p.mu.RUnlock() + return p.Bid, p.Mid, p.Ask +} + +func main() { + // Get initial values from environment variables or use defaults + btcUsdInitialValue := getInitialValue("BTCUSD_INITIAL_VALUE", 1000.0) + linkInitialValue := getInitialValue("LINK_INITIAL_VALUE", 11.0) + nativeInitialValue := getInitialValue("NATIVE_INITIAL_VALUE", 2400.0) + + pctBounds := 0.3 + + // Start external adapters on different ports + externalAdapter(btcUsdInitialValue, "4001", pctBounds) + externalAdapter(linkInitialValue, "4002", pctBounds) + externalAdapter(nativeInitialValue, "4003", pctBounds) + + // Block main goroutine indefinitely + select {} +} + +// getInitialValue retrieves the initial value from the environment or returns a default +func getInitialValue(envVar string, defaultValue float64) float64 { + valueEnv := os.Getenv(envVar) + if valueEnv == "" { + fmt.Printf("%s not set, using default value: %.4f\n", envVar, defaultValue) + return defaultValue + } + fmt.Printf("%s set to %s\n", envVar, valueEnv) + val, err := strconv.ParseFloat(valueEnv, 64) + PanicErr(err) + return val +} + +// externalAdapter sets up a mock external adapter server for a specific asset +func externalAdapter(initialValue float64, port string, pctBounds float64) *httptest.Server { + // Create a custom listener on the specified port + listener, err := net.Listen("tcp", "0.0.0.0:"+port) + if err != nil { + panic(err) + } + + // Initialize the Price struct + price := &Price{ + Bid: initialValue, + Mid: initialValue, + Ask: initialValue, + } + + step := initialValue * pctBounds / 10 + ceiling := initialValue * (1 + pctBounds) + floor := initialValue * (1 - pctBounds) + + // Perform initial adjustment to set bid and ask + price.Update(step, floor, ceiling) + + // Start a goroutine to periodically update the price + go func() { + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + for range ticker.C { + price.Update(step, floor, ceiling) + fmt.Printf("Updated prices on port %s: bid=%.4f, mid=%.4f, ask=%.4f\n", port, price.Bid, price.Mid, price.Ask) + } + }() + + handler := http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + bid, mid, ask := price.GetSnapshot() + + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusOK) + resp := fmt.Sprintf(`{"result": {"bid": %.4f, "mid": %.4f, "ask": %.4f}}`, bid, mid, ask) + if _, err := res.Write([]byte(resp)); err != nil { + fmt.Printf("failed to write response: %v\n", err) + } + }) + + // Create and start the test server + ea := &httptest.Server{ + Listener: listener, + Config: &http.Server{ + Handler: handler, + ReadHeaderTimeout: 5 * time.Second, + }, + } + ea.Start() + + fmt.Printf("Mock external adapter started at %s\n", ea.URL) + fmt.Printf("Initial value: %.4f, Floor: %.4f, Ceiling: %.4f\n", initialValue, floor, ceiling) + return ea +} + +// adjustValue takes a starting value and randomly shifts it up or down by a step. +// It ensures that the value stays within the specified bounds. +func adjustValue(start, step, floor, ceiling float64) float64 { + // Randomly choose to increase or decrease the value + // #nosec G404 + if rand.Intn(2) == 0 { + step = -step + } + + // Apply the step to the starting value + newValue := start + step + + // Ensure the value is within the bounds + if newValue < floor { + newValue = floor + } else if newValue > ceiling { + newValue = ceiling + } + + return newValue +} diff --git a/core/scripts/keystone/src/external-adapter/Dockerfile b/core/scripts/keystone/src/external-adapter/Dockerfile new file mode 100644 index 00000000000..714d9397c34 --- /dev/null +++ b/core/scripts/keystone/src/external-adapter/Dockerfile @@ -0,0 +1,5 @@ +FROM scratch + +COPY ./kiab-mock-external-adapter / + +ENTRYPOINT ["/kiab-mock-external-adapter"] diff --git a/core/scripts/keystone/src/testdata/NodeList.txt b/core/scripts/keystone/src/testdata/NodeList.txt deleted file mode 100644 index 6fb65dded69..00000000000 --- a/core/scripts/keystone/src/testdata/NodeList.txt +++ /dev/null @@ -1,5 +0,0 @@ -https://local-node1 https://crib-henry-keystone-node1.main.stage.cldev.sh notreal@fakeemail.ch fj293fbBnlQ!f9vNs -https://local-node2 https://crib-henry-keystone-node2.main.stage.cldev.sh notreal@fakeemail.ch fj293fbBnlQ!f9vNs -https://local-node3 https://crib-henry-keystone-node3.main.stage.cldev.sh notreal@fakeemail.ch fj293fbBnlQ!f9vNs -https://local-node4 https://crib-henry-keystone-node4.main.stage.cldev.sh notreal@fakeemail.ch fj293fbBnlQ!f9vNs -https://local-node5 https://crib-henry-keystone-node5.main.stage.cldev.sh notreal@fakeemail.ch fj293fbBnlQ!f9vNs diff --git a/core/scripts/keystone/src/testdata/PublicKeys.json b/core/scripts/keystone/src/testdata/PublicKeys.json deleted file mode 100644 index b29e8290895..00000000000 --- a/core/scripts/keystone/src/testdata/PublicKeys.json +++ /dev/null @@ -1,57 +0,0 @@ -[ - { - "AptosBundleID": "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfa", - "AptosOnchainPublicKey": "ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4a", - "EthAddress": "0xF4e7e516146c8567F8E8be0ED1f1A92798628d35", - "P2PPeerID": "12D3KooWNmhKZL1XW4Vv3rNjLXzJ6mqcVerihdijjGYuexPrFUFZ", - "OCR2BundleID": "2f92c96da20fbe39c89e59516e3a7473254523316887394e406527c72071d3db", - "OCR2OnchainPublicKey": "a2402db8e549f094ea31e1c0edd77623f4ca5b12", - "OCR2OffchainPublicKey": "3ca9918cd2787de8f9aff91f220f30a5cc54c394f73e173b12c93368bd7072ad", - "OCR2ConfigPublicKey": "19904debd03994fe9ea411cda7a6b2f01f20a3fe803df0fed67aaf00cc99113f", - "CSAPublicKey": "csa_dbae6965bad0b0fa95ecc34a602eee1c0c570ddc29b56502e400d18574b8c3df" - }, - { - "AptosBundleID": "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfb", - "AptosOnchainPublicKey": "ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4b", - "EthAddress": "0x8B60FDcc9CAC8ea476b31d17011CB204471431d9", - "P2PPeerID": "12D3KooWFUjV73ZYkAMhS2cVwte3kXDWD8Ybyx3u9CEDHNoeEhBH", - "OCR2BundleID": "b3df4d8748b67731a1112e8b45a764941974f5590c93672eebbc4f3504dd10ed", - "OCR2OnchainPublicKey": "4af19c802b244d1d085492c3946391c965e10519", - "OCR2OffchainPublicKey": "365b9e1c3c945fc3f51afb25772f0a5a1f1547935a4b5dc89c012f590709fefe", - "OCR2ConfigPublicKey": "15ff12569d11b8ff9f17f8999ea928d03a439f3fb116661cbc4669a0a3192775", - "CSAPublicKey": "csa_c5cc655a9c19b69626519c4a72c44a94a3675daeba9c16cc23e010a7a6dac1be" - }, - { - "AptosBundleID": "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfc", - "AptosOnchainPublicKey": "ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4c", - "EthAddress": "0x6620F516F29979B214e2451498a057FDd3a0A85d", - "P2PPeerID": "12D3KooWRTtH2WWrztD87Do1kXePSmGjyU4r7mZVWThmqTGgdbUC", - "OCR2BundleID": "38459ae37f29f2c1fde0f25972a973322be8cada82acf43f464756836725be97", - "OCR2OnchainPublicKey": "61925685d2b80b121537341d063c4e57b2f9323c", - "OCR2OffchainPublicKey": "7fe2dbd9f9fb96f7dbbe0410e32d435ad67dae6c91410189fe5664cf3057ef10", - "OCR2ConfigPublicKey": "2f02fd80b362e1c7acf91680fd48c062718233acd595a6ae7cbe434e118e6a4f", - "CSAPublicKey": "csa_7407fc90c70895c0fb2bdf385e2e4918364bec1f7a74bad7fdf696bffafbcab8" - }, - { - "AptosBundleID": "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfd", - "AptosOnchainPublicKey": "ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4d", - "EthAddress": "0xFeB61E22FCf4F9740c9D96b05199F195bd61A7c2", - "P2PPeerID": "12D3KooWMTZnZtcVK4EJsjkKsV9qXNoNRSjT62CZi3tKkXGaCsGh", - "OCR2BundleID": "b5dbc4c9da983cddde2e3226b85807eb7beaf818694a22576af4d80f352702ed", - "OCR2OnchainPublicKey": "fd97efd53fc20acc098fcd746c04d8d7540d97e0", - "OCR2OffchainPublicKey": "91b393bb5e6bd6fd9de23845bcd0e0d9b0dd28a1d65d3cfb1fce9f91bd3d8c19", - "OCR2ConfigPublicKey": "09eb53924ff8b33a08b4eae2f3819015314ce6e8864ac4f86e97caafd4181506", - "CSAPublicKey": "csa_ef55caf17eefc2a9d547b5a3978d396bd237c73af99cd849a4758701122e3cba" - }, - { - "AptosBundleID": "9bebfa953e7a7522746f72b4023308de36db626f3e0bcb9033407b8a183e8bfe", - "AptosOnchainPublicKey": "ea551e503b93a1c9ae26262b4db8f66db4cbe5ddcb6039e29d2665a634d48e4e", - "EthAddress": "0x882Fd04D78A7e7D386Dd5b550f19479E5494B0B2", - "P2PPeerID": "12D3KooWRsM9yordRQDhLgbErH8WMMGz1bC1J4hR5gAGvMWu8goN", - "OCR2BundleID": "260d5c1a618cdf5324509d7db95f5a117511864ebb9e1f709e8969339eb225af", - "OCR2OnchainPublicKey": "a0b67dc5345a71d02b396147ae2cb75dda63cbe9", - "OCR2OffchainPublicKey": "4f42ef42e5cc351dbbd79c29ef33af25c0250cac84837c1ff997bc111199d07e", - "OCR2ConfigPublicKey": "3b90249731beb9e4f598371f0b96c3babf47bcc62121ebc9c195e3c33e4fd708", - "CSAPublicKey": "csa_1b874ac2d54b966cec5a8358678ca6f030261aabf3372ce9dbea2d4eb9cdab3d" - } -] \ No newline at end of file diff --git a/core/scripts/keystone/src/testdata/node_sets.json b/core/scripts/keystone/src/testdata/node_sets.json new file mode 100644 index 00000000000..b5502a0ed53 --- /dev/null +++ b/core/scripts/keystone/src/testdata/node_sets.json @@ -0,0 +1,298 @@ +{ + "Workflow": { + "Name": "workflow", + "Prefix": "ks-wf-", + "Nodes": [ + { + "URL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-bt-node1.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-bt-node1.local", + "path": "" + }, + "ServiceName": "app-0-ks-wf-bt-node1", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node2.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node2.local", + "path": "" + }, + "ServiceName": "app-0-ks-wf-node2", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node3.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node3.local", + "path": "" + }, + "ServiceName": "app-0-ks-wf-node3", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node4.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node4.local", + "path": "" + }, + "ServiceName": "app-0-ks-wf-node4", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node5.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-0-ks-wf-node5.local", + "path": "" + }, + "ServiceName": "app-0-ks-wf-node5", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + } + ], + "NodeKeys": [ + { + "AptosAccount": "38476e214b5c60e642019b38db9f06ce6c5a9bcb987d2bfbbbe750195aa7e964", + "EthAddress": "0x568C859E34F210a23847acE0D4960dB74f359dC4", + "P2PPeerID": "12D3KooWFSmZaLFF1nu3mzxPKj43F89WgVDqkpvwFUHBfMHSqpVq", + "CSAPublicKey": "981d781740ff79bb181a4c70390bd54e936f2d9211f5b20c708205b481a8efcc", + "OCR2BundleID": "782e4d14d0f53071ab7a45b9085eb99beed3350e7ab72d5edd4429169e5c87ef", + "OCR2OnchainPublicKey": "357ddc6c0fc6510ec67edbb9a63819dcb47f1506", + "OCR2OffchainPublicKey": "822488a7e4583eed41e5ab142dd6be721c2cc0f217ceee0912ff2db2a24e404c", + "OCR2ConfigPublicKey": "c92ae7ff9b1cef97bb875917456bc6b83df5f5a76ad00c914869c7068748f31a", + "AptosBundleID": "d4acd2c80860fd1e49363f08426e7e5efa7fcd57356a8aba408732e975d3e9a6", + "AptosOnchainPublicKey": "48b37d91fd2c2c784759021d421e7e6f98078b4343cf8cab378394aa357a49a2" + }, + { + "AptosAccount": "bd4d7e53622621af04a0100db7720508c41f3dd5fe9e97dd57eb9673d82a385d", + "EthAddress": "0x75cf1355cC4Eb358feaBb9e269a4DAEeB6721DBB", + "P2PPeerID": "12D3KooWHhXyDmHB6D1UQosLXmhczw3zxB3DLYBuq9Unb4iCD4Sc", + "CSAPublicKey": "6a4723752c843c8d91e542af5373b3d123eca05b570a6e910f5d2f28737a26f6", + "OCR2BundleID": "20ccdc97afdf467465590115e3da4e5eb591bf5f43808e81a5d0807cd889b3c7", + "OCR2OnchainPublicKey": "321bc7af41a634375526006365a31bf32b4cfa7c", + "OCR2OffchainPublicKey": "a2b7f0b85be445e2c7317bdff74c41acd9c67b5a35cda94ae31da8a9ef886db2", + "OCR2ConfigPublicKey": "faa4cfefb226ae8e86480e019bd5bbd6405c26e22dcea40d2c6f01e583213e21", + "AptosBundleID": "ac364cec9fe7d9ea1035fc511e5b2f30900caa6e65ac0501168005d05129e088", + "AptosOnchainPublicKey": "4ca789105da974eec967758ad32b575741d6cb36c1bb3bcfd87b235502cc1753" + }, + { + "AptosAccount": "9543634f4a73e4cfb284816b32d7ec6b7ac8d07b841f2c1f714750186cc28a5a", + "EthAddress": "0xc6dcE30f492CBD223b9946603192f22D86e783ca", + "P2PPeerID": "12D3KooWEWK8e627u6S5NuwXTgGLakGpn1vzQjyjp6Regu1pcpFC", + "CSAPublicKey": "a715467dd87ea210d685b82a32f30c781031df00c2c974dc5fad9159a7ba240c", + "OCR2BundleID": "c734a4bf01aabe8152b7d0df0b18111ce9b3fe1ef1bca1d6a580967c8e4afc2d", + "OCR2OnchainPublicKey": "5192c43a68efb7a698c0459ff8591a115da128ee", + "OCR2OffchainPublicKey": "5e68d07f82ea0bf7054560775c2919bc955dd7fa73b2a36391a4dc27cbb18fdb", + "OCR2ConfigPublicKey": "ebd66285a029f443277091bc4b191b13e21a9b806ce379584411277a265c8e5c", + "AptosBundleID": "f1dfc3d44ee349b4349f33ce4c0ec3716142e9be3ae3ba9276c616556f6430bb", + "AptosOnchainPublicKey": "169008927a60e6c03e99aac6fa268dabaf4d00e117419861d87836211267361b" + }, + { + "AptosAccount": "bcd6fdce3fdcd060fed58fe13be522dc3fb0cff138b0f4f4460392f5e6d88728", + "EthAddress": "0x1289d00A6565Afcd6437B09548F6019EF49696d0", + "P2PPeerID": "12D3KooW9uJ981ocDxTJrPVxMEzPcS14WTJSU1YWH5otcpZSqkUd", + "CSAPublicKey": "9db8641f2067bfdf476e375060a0bd97c21da46d9f54c6ff4f990c6aef882478", + "OCR2BundleID": "129377e1aea4f628b2a3274e528a131175ace13e7cc062b048a34f5b4cf7b512", + "OCR2OnchainPublicKey": "ed613636925af2df6ed8332d95028eabcbe95a3f", + "OCR2OffchainPublicKey": "4eb3e2f1d324804d0adf5169bc187425d3e665c29cddf13bd57ec40ee207ce75", + "OCR2ConfigPublicKey": "effd7d3535e1b6596068085b3e19f9577a536aeacbdeea318cbd870ec678334d", + "AptosBundleID": "2e39d555ec0d1e8795167d72d2a53faa5c537762c144f8a569c601f6bcc95d1d", + "AptosOnchainPublicKey": "ce86b34de67249f92058f69e47961907ebbf8a71c12123f1d2a7cab4874f6365" + }, + { + "AptosAccount": "6549063d427778024fc4230154753c1a30eac88a7a8eab1d36014a3db48c39b3", + "EthAddress": "0x4b92B0aaC39932B7302676F48e78FA91852DC0EE", + "P2PPeerID": "12D3KooWJJC2KgoP1oih7cky9B1wL12d5CBqWFKpdfQgfujmHGyz", + "CSAPublicKey": "8c8b473cc37664a21d548477cd268013256d1d70cd9a137bdfd99da7612a93e0", + "OCR2BundleID": "053f21bfd2bbdb65261308af2d0be48593229d644d8b9e3e5dbe36f85399ae6c", + "OCR2OnchainPublicKey": "53b5bbc0efa2e2d2770029bab5d5a647a260a72b", + "OCR2OffchainPublicKey": "eac02c66802acd9cd998b9b45c52b5b36837bfb829b2838cade040e0155c774a", + "OCR2ConfigPublicKey": "43b9d0c7cace05fd17426dad4386857025a71eb08205690dff5f76224e9c7f5c", + "AptosBundleID": "3de7ab03a5b6b7fcfd196c6101d9302c5e6a5221ebd82b1fd9afa9a6bc9b0445", + "AptosOnchainPublicKey": "f2cb4932d3ce8c10bf67c60d35372a5ff1578255e25c2a119c2dea70e919567a" + } + ] + }, + "StreamsTrigger": { + "Name": "streams-trigger", + "Prefix": "ks-str-trig-", + "Nodes": [ + { + "URL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-bt-node1.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-bt-node1.local", + "path": "" + }, + "ServiceName": "app-1-ks-str-trig-bt-node1", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node2.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node2.local", + "path": "" + }, + "ServiceName": "app-1-ks-str-trig-node2", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node3.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node3.local", + "path": "" + }, + "ServiceName": "app-1-ks-str-trig-node3", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node4.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node4.local", + "path": "" + }, + "ServiceName": "app-1-ks-str-trig-node4", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + }, + { + "URL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node5.local", + "path": "" + }, + "RemoteURL": { + "scheme": "https", + "host": "crib-local-1-ks-str-trig-node5.local", + "path": "" + }, + "ServiceName": "app-1-ks-str-trig-node5", + "APILogin": "notreal@fakeemail.ch", + "APIPassword": "fj293fbBnlQ!f9vNs", + "KeystorePassword": "T.tLHkcmwePT/p,]sYuntjwHKAsrhm#4eRs4LuKHwvHejWYAC2JP4M8HimwgmbaZ" + } + ], + "NodeKeys": [ + { + "AptosAccount": "", + "EthAddress": "0xCE59C33dc0807F194ba2566e093398dc5e459840", + "P2PPeerID": "12D3KooWGQkR76gPL7Qt1aYYHtPdU65zG4h36efLAERMwudHqGK3", + "CSAPublicKey": "b3dcb60fcf807453c95628a1e7970b077b5e2bbefb0b159841c28dc1776574de", + "OCR2BundleID": "ca2e13222b556f34ae5bc9cea72e6e7ce8221bf76e2d8b3d91505facdbbd71d3", + "OCR2OnchainPublicKey": "a4938b0552e830b45d481cca9893f319259d365c", + "OCR2OffchainPublicKey": "221fe1f972f01da727dbd6842daf4d322f7cab5a7e93816688be7a52f4088b86", + "OCR2ConfigPublicKey": "1af18519dfc5a22db640f1f8095bafaaeb987ab4e3e7ec366dfaa92df9a6ee7b", + "AptosBundleID": "", + "AptosOnchainPublicKey": "" + }, + { + "AptosAccount": "", + "EthAddress": "0x898D0206d3b3156b92bD499eDFBAfc476543A21F", + "P2PPeerID": "12D3KooWDN2jTmtrpZMpjFFuQdzxHyUecBE3zPLG4goaWG7H2iDa", + "CSAPublicKey": "bdf13ff3944d59a3e1ea5888f86c0bbfe5eb33e2140188516592bf245d080320", + "OCR2BundleID": "3b5d75124ef0f02efd46c08da4b67d36154eed680d7dafd360d976430fe11a7b", + "OCR2OnchainPublicKey": "3815f48818db64aa8d7b225e229a328826f3d1de", + "OCR2OffchainPublicKey": "ca6f4c20c00fb7af411060cfc226d61d11ce5e3532ebbd15786fe53c32244de3", + "OCR2ConfigPublicKey": "7b4e462c24d9076a8822bd4e2bdbd834fcc7898aabd9862dbcdb7df6686d2b41", + "AptosBundleID": "", + "AptosOnchainPublicKey": "" + }, + { + "AptosAccount": "", + "EthAddress": "0xb26dD9CD1Fc4Df2F84170960E2a36ed4a5ac6bB7", + "P2PPeerID": "12D3KooWJTedkdgDmkAms4pEKDnXX7CXshkxwEcK6hWki519YEqF", + "CSAPublicKey": "e95ded4fc733eac43292dc24d8630101cf0c3f40c3764233a6321077eacd0b90", + "OCR2BundleID": "742b2a8a90e59aeb8bb35313d4078ef3f950a9e42a157b7ee9e9abd8c7d97d94", + "OCR2OnchainPublicKey": "57b41043e9a2b21329be48ccf72943af20b322ff", + "OCR2OffchainPublicKey": "0d90fc04c4c0439c184a06478ec1fed7cedfb799b303a6d68c046d90f077b5bd", + "OCR2ConfigPublicKey": "a73c070b60c9a175ac34cfd5c6c7884e73b5c8d43417be3f00bc43ac0fb67f39", + "AptosBundleID": "", + "AptosOnchainPublicKey": "" + }, + { + "AptosAccount": "", + "EthAddress": "0x50b1bB407F0Ecd71416BfA8a1d703F6115112676", + "P2PPeerID": "12D3KooWS1i3x2r34vYCfYrz2ddWUVYtFGNaZvGNNxqzL4Rysd3V", + "CSAPublicKey": "46b50be4d72b03f1601ade056bc754f194d6418283065970d470f6f3243f0705", + "OCR2BundleID": "1232eb7cdb4145ec8b543b76f17fe59c69aa6df31c827a7553aea3a3d340c637", + "OCR2OnchainPublicKey": "dad1e5d6824d7b64df57e9ca3342e4caf66b2c91", + "OCR2OffchainPublicKey": "8a7e9833bf8a55435c82866dbe5f9a9bac63b9a93c8c55664dffe653ab4145a2", + "OCR2ConfigPublicKey": "48ce76ee5ddd8003ebbd10485a092f8bd237f0f855aca8aba5ccac78b593e62d", + "AptosBundleID": "", + "AptosOnchainPublicKey": "" + }, + { + "AptosAccount": "", + "EthAddress": "0xa2340108BE2c563bB89462b464aCF3f88cCd1584", + "P2PPeerID": "12D3KooWLZFWAhTejyR7WwwQndgNGGiW3XcGKK6nNtWbhdgCG1rC", + "CSAPublicKey": "0837cd5a8544664eaf04f68347bdba4cb7ac6af34488f0a26c65b03fe223d5af", + "OCR2BundleID": "2fcbac5dd48e995772d85c47d2744b0df7b74b71d17001f283318cae43b96add", + "OCR2OnchainPublicKey": "469d3c0c484c6846be1176920f1cbdc8abb6f638", + "OCR2OffchainPublicKey": "21aa97506b74e3bfcbe6eb87f2a6add07898fecbddbcec2447832dc343395499", + "OCR2ConfigPublicKey": "a6b7e8ca4faf6122165928d82354de3f9334cdb47af058f6a983d11473c21b5f", + "AptosBundleID": "", + "AptosOnchainPublicKey": "" + } + ] + } +} diff --git a/core/scripts/keystone/templates/bootstrap.toml b/core/scripts/keystone/templates/bootstrap.toml deleted file mode 100644 index cdd9065caba..00000000000 --- a/core/scripts/keystone/templates/bootstrap.toml +++ /dev/null @@ -1,9 +0,0 @@ -type = "bootstrap" -schemaVersion = 1 -name = "Keystone boot" -contractID = "{{ ocr_config_contract_address }}" -relay = "evm" - -[relayConfig] -chainID = "{{ chain_id }}" -providerType = "ocr3-capability" diff --git a/core/scripts/keystone/templates/crib-overrides.yaml b/core/scripts/keystone/templates/crib-overrides.yaml deleted file mode 100644 index baeaa5fa1d9..00000000000 --- a/core/scripts/keystone/templates/crib-overrides.yaml +++ /dev/null @@ -1,41 +0,0 @@ -helm: - values: - chainlink: - nodes: - node1: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - node2: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '{{ node_2_address }}' - ForwarderAddress = '{{ forwarder_address }}' - node3: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '{{ node_3_address }}' - ForwarderAddress = '{{ forwarder_address }}' - node4: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '{{ node_4_address }}' - ForwarderAddress = '{{ forwarder_address }}' - node5: - image: ${runtime.images.app} - overridesToml: |- - [[EVM]] - ChainID = '11155111' - [EVM.Workflow] - FromAddress = '{{ node_5_address }}' - ForwarderAddress = '{{ forwarder_address }}' diff --git a/core/scripts/keystone/templates/oracle.toml b/core/scripts/keystone/templates/oracle.toml deleted file mode 100644 index 053baa2223b..00000000000 --- a/core/scripts/keystone/templates/oracle.toml +++ /dev/null @@ -1,27 +0,0 @@ -type = "offchainreporting2" -schemaVersion = 1 -name = "Keystone" -contractID = "{{ ocr_config_contract_address }}" -ocrKeyBundleID = "{{ ocr_key_bundle_id }}" -p2pv2Bootstrappers = [ - "{{ bootstrapper_p2p_id }}", -] -relay = "evm" -pluginType = "plugin" -transmitterID = "{{ transmitter_id }}" - -[relayConfig] -chainID = "{{ chain_id }}" - -[pluginConfig] -command = "chainlink-ocr3-capability" -ocrVersion = 3 -pluginName = "ocr-capability" -providerType = "ocr3-capability" -telemetryType = "plugin" - -[onchainSigningStrategy] -strategyName = 'multi-chain' -[onchainSigningStrategy.config] -evm = "{{ ocr_key_bundle_id }}" -aptos = "{{ aptos_key_bundle_id }}" diff --git a/core/services/job/models.go b/core/services/job/models.go index 26d563c7ac8..63e521c5b3b 100644 --- a/core/services/job/models.go +++ b/core/services/job/models.go @@ -935,7 +935,7 @@ func (w *WorkflowSpec) SDKSpec(ctx context.Context) (sdk.WorkflowSpec, error) { } spec, rawSpec, cid, err := workflowSpecFactory.Spec(ctx, w.Workflow, w.Config) if err != nil { - return sdk.WorkflowSpec{}, err + return sdk.WorkflowSpec{}, fmt.Errorf("spec factory failed: %w", err) } w.sdkWorkflow = &spec w.rawSpec = rawSpec diff --git a/deployment/go.mod b/deployment/go.mod index 2daefd78f11..872c3bd7815 100644 --- a/deployment/go.mod +++ b/deployment/go.mod @@ -9,7 +9,7 @@ replace github.com/smartcontractkit/chainlink/v2 => ../ // Using a separate inline `require` here to avoid surrounding line changes // creating potential merge conflicts. -require github.com/smartcontractkit/chainlink/v2 v2.0.0-20241206210521-125d98cdaf66 +require github.com/smartcontractkit/chainlink/v2 v2.0.0-20241212011003-de1a8f5e5b42 require ( github.com/Khan/genqlient v0.7.0 diff --git a/integration-tests/go.mod b/integration-tests/go.mod index cc161d6b92a..f79a1e66a0d 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -12,8 +12,8 @@ replace github.com/smartcontractkit/chainlink/deployment => ../deployment // Using a separate `require` here to avoid surrounding line changes // creating potential merge conflicts. require ( - github.com/smartcontractkit/chainlink/deployment v0.0.0-20241206210521-125d98cdaf66 - github.com/smartcontractkit/chainlink/v2 v2.0.0-20241206210521-125d98cdaf66 + github.com/smartcontractkit/chainlink/deployment v0.0.0-20241212011003-de1a8f5e5b42 + github.com/smartcontractkit/chainlink/v2 v2.0.0-20241212011003-de1a8f5e5b42 ) require ( diff --git a/integration-tests/load/go.mod b/integration-tests/load/go.mod index fbf204e160d..ab7a58df9e9 100644 --- a/integration-tests/load/go.mod +++ b/integration-tests/load/go.mod @@ -14,9 +14,9 @@ replace github.com/smartcontractkit/chainlink/integration-tests => ../ // Using a separate `require` here to avoid surrounding line changes // creating potential merge conflicts. require ( - github.com/smartcontractkit/chainlink/deployment v0.0.0-20241206210521-125d98cdaf66 - github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20241206210521-125d98cdaf66 - github.com/smartcontractkit/chainlink/v2 v2.0.0-20241206210521-125d98cdaf66 + github.com/smartcontractkit/chainlink/deployment v0.0.0-20241212011003-de1a8f5e5b42 + github.com/smartcontractkit/chainlink/integration-tests v0.0.0-20241212011003-de1a8f5e5b42 + github.com/smartcontractkit/chainlink/v2 v2.0.0-20241212011003-de1a8f5e5b42 ) require ( diff --git a/shell.nix b/shell.nix index 456bbd8a9c1..9860ae78cc5 100644 --- a/shell.nix +++ b/shell.nix @@ -111,11 +111,10 @@ in echo "GORELEASER_KEY must be set in CRIB environments. You can find it in our 1p vault under 'goreleaser-pro-license'." exit 1 fi - ${if stdenv.isDarwin then "source ./nix-darwin-shell-hook.sh" else ""} + ${if stdenv.isDarwin then "source $(git rev-parse --show-toplevel)/nix-darwin-shell-hook.sh" else ""} ''} ''; - GOROOT = "${go}/share/go"; PGDATA = "db"; CL_DATABASE_URL = "postgresql://chainlink:chainlink@localhost:5432/chainlink_test?sslmode=disable"; } diff --git a/tools/goreleaser-config/go.mod b/tools/goreleaser-config/go.mod index f46423b660d..d0e66514869 100644 --- a/tools/goreleaser-config/go.mod +++ b/tools/goreleaser-config/go.mod @@ -1,6 +1,6 @@ module github.com/smartcontractkit/chainlink/tools/goreleaser-config -go 1.23.0 +go 1.22.8 require ( github.com/goreleaser/goreleaser-pro/v2 v2.3.2-pro