From 3d2f239a4199cedcc04aa0e4b02d3a6ab36c6a92 Mon Sep 17 00:00:00 2001 From: krehermann Date: Wed, 26 Jun 2024 17:09:06 -0600 Subject: [PATCH] refactor for multiple go migrations and add test fixtures --- .../plugins/relayer/evm/0002_initial.go | 103 +++--- .../plugins/relayer/evm/0002_initial_test.go | 68 ++-- .../relayer/evm/forwardersDown.tmpl.sql | 21 ++ .../plugins/relayer/evm/forwardersUp.tmpl.sql | 301 ++++++++++++++++++ .../plugins/relayer/evm/headsDown.tmpl.sql | 5 + .../plugins/relayer/evm/headsUp.tmpl.sql | 25 ++ .../plugins/relayer/evm/initDown.tmpl.sql | 5 +- .../plugins/relayer/evm/initUp.tmpl.sql | 8 +- .../relayer/evm/key_statesDown.tmpl.sql | 4 + .../plugins/relayer/evm/key_statesUp.tmpl.sql | 17 + .../migrate/plugins/relayer/evm/migrate.go | 37 +-- .../plugins/relayer/evm/migrate_test.go | 209 +++++++++++- .../migrate/plugins/relayer/evm/resolver.go | 15 +- .../evm/testdata/forwarders/initial.sql | 4 + .../relayer/evm/testdata/heads/initial.sql | 4 + .../evm/testdata/key_states/initial.sql | 4 + 16 files changed, 719 insertions(+), 111 deletions(-) create mode 100644 core/store/migrate/plugins/relayer/evm/forwardersDown.tmpl.sql create mode 100644 core/store/migrate/plugins/relayer/evm/forwardersUp.tmpl.sql create mode 100644 core/store/migrate/plugins/relayer/evm/headsDown.tmpl.sql create mode 100644 core/store/migrate/plugins/relayer/evm/headsUp.tmpl.sql create mode 100644 core/store/migrate/plugins/relayer/evm/key_statesDown.tmpl.sql create mode 100644 core/store/migrate/plugins/relayer/evm/key_statesUp.tmpl.sql create mode 100644 core/store/migrate/plugins/relayer/evm/testdata/forwarders/initial.sql create mode 100644 core/store/migrate/plugins/relayer/evm/testdata/heads/initial.sql create mode 100644 core/store/migrate/plugins/relayer/evm/testdata/key_states/initial.sql diff --git a/core/store/migrate/plugins/relayer/evm/0002_initial.go b/core/store/migrate/plugins/relayer/evm/0002_initial.go index a6c9eaa52b2..7ed7fd60b7f 100644 --- a/core/store/migrate/plugins/relayer/evm/0002_initial.go +++ b/core/store/migrate/plugins/relayer/evm/0002_initial.go @@ -6,60 +6,58 @@ import ( "database/sql" _ "embed" "fmt" - "io" "github.com/pressly/goose/v3" ) -//go:embed initUp.tmpl.sql -var upTmpl string +//go:embed forwardersUp.tmpl.sql +var forwardersUpTmpl string -func resolveUp(out io.Writer, val Cfg) error { - if upTmpl == "" { - return fmt.Errorf("upTmpl is empty") - } - return resolve(out, upTmpl, val) -} +//go:embed forwardersDown.tmpl.sql +var forwardersDownTmpl string -//go:embed initDown.tmpl.sql -var downTmpl string +//go:embed headsUp.tmpl.sql +var headsUpTmpl string -func resolveDown(out io.Writer, val Cfg) error { - return resolve(out, downTmpl, val) -} +//go:embed headsDown.tmpl.sql +var headsDownTmpl string -// Register0002 registers the migration with goose -/* -func Register0002(val Cfg) error { - upSQL := &bytes.Buffer{} - err := resolveUp(upSQL, val) - if err != nil { - return fmt.Errorf("failed to resolve up sql: %w", err) - } - upFunc := func(ctx context.Context, tx *sql.Tx) error { - _, err := tx.ExecContext(ctx, upSQL.String()) - return err - } +//go:embed key_statesUp.tmpl.sql +var keyStatesUpTmpl string - downSQL := &bytes.Buffer{} - err = resolveDown(downSQL, val) - if err != nil { - return fmt.Errorf("failed to resolve down sql: %w", err) - } - downFunc := func(ctx context.Context, tx *sql.Tx) error { - _, err := tx.ExecContext(ctx, downSQL.String()) - return err - } - goose.AddMigrationContext(upFunc, downFunc) - return nil +//go:embed key_statesDown.tmpl.sql +var keyStatesDownTmpl string + +type initialMigration struct { + upTmpl string + downTmpl string + version int64 } -*/ -func generate0002(val Cfg) (up *goose.GoFunc, down *goose.GoFunc, err error) { +var ( + forwarderMigration = initialMigration{ + upTmpl: forwardersUpTmpl, + downTmpl: forwardersDownTmpl, + version: 2} + + headsMigration = initialMigration{ + upTmpl: headsUpTmpl, + downTmpl: headsDownTmpl, + version: 3} + + keyStatesMigration = initialMigration{ + upTmpl: keyStatesUpTmpl, + downTmpl: keyStatesDownTmpl, + version: 4} + + initialMigrations = []initialMigration{forwarderMigration, headsMigration, keyStatesMigration} +) + +func generateGoMigration(val Cfg, m initialMigration) (*goose.Migration, error) { upSQL := &bytes.Buffer{} - err = resolveUp(upSQL, val) + err := resolve(upSQL, m.upTmpl, val) if err != nil { - return nil, nil, fmt.Errorf("failed to resolve up sql: %w", err) + return nil, fmt.Errorf("failed to resolve up sql: %w", err) } upFunc := func(ctx context.Context, tx *sql.Tx) error { _, terr := tx.ExecContext(ctx, upSQL.String()) @@ -67,16 +65,27 @@ func generate0002(val Cfg) (up *goose.GoFunc, down *goose.GoFunc, err error) { } downSQL := &bytes.Buffer{} - err = resolveDown(downSQL, val) + err = resolve(downSQL, m.downTmpl, val) if err != nil { - return nil, nil, fmt.Errorf("failed to resolve down sql: %w", err) + return nil, fmt.Errorf("failed to resolve down sql: %w", err) } downFunc := func(ctx context.Context, tx *sql.Tx) error { _, terr := tx.ExecContext(ctx, downSQL.String()) return terr } - up = &goose.GoFunc{RunTx: upFunc} - down = &goose.GoFunc{RunTx: downFunc} - //P goose.AddMigrationContext(upFunc, downFunc) - return up, down, nil + up := &goose.GoFunc{RunTx: upFunc} + down := &goose.GoFunc{RunTx: downFunc} + return goose.NewGoMigration(m.version, up, down), nil +} + +func generateInitialMigrations(val Cfg) ([]*goose.Migration, error) { + migrations := []*goose.Migration{} + for _, m := range initialMigrations { + mig, err := generateGoMigration(val, m) + if err != nil { + return nil, fmt.Errorf("failed to generate migration: %w", err) + } + migrations = append(migrations, mig) + } + return migrations, nil } diff --git a/core/store/migrate/plugins/relayer/evm/0002_initial_test.go b/core/store/migrate/plugins/relayer/evm/0002_initial_test.go index 795574de045..4f4b0993c76 100644 --- a/core/store/migrate/plugins/relayer/evm/0002_initial_test.go +++ b/core/store/migrate/plugins/relayer/evm/0002_initial_test.go @@ -3,6 +3,7 @@ package evm import ( "bytes" _ "embed" + "strings" "testing" "github.com/smartcontractkit/chainlink/v2/core/chains/evm/utils/big" @@ -12,30 +13,47 @@ import ( ) func Test_resolveup(t *testing.T) { - type args struct { - val Cfg - } - tests := []struct { - name string - args args - wantErr bool - }{ - { - name: "evm template", - args: args{ - val: Cfg{ - Schema: "evm", - ChainID: big.NewI(int64(3266)), - }, + t.Run("resolve up migrations", func(t *testing.T) { + type test struct { + name string + upTmpl string + } + cases := []test{ + { + name: "forwarders", + upTmpl: forwardersUpTmpl, }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - out := &bytes.Buffer{} - err := resolveUp(out, tt.args.val) - require.NoError(t, err) - assert.NotEmpty(t, out.String()) - }) - } + } + for _, tt := range cases { + t.Run("do nothing for evm schema", func(t *testing.T) { + out := &bytes.Buffer{} + err := resolve(out, tt.upTmpl, Cfg{Schema: "evm", ChainID: big.NewI(int64(3266))}) + require.NoError(t, err) + assert.Equal(t, "-- Do nothing for `evm` schema for backward compatibility\n", out.String()) + }) + + t.Run("err no chain id", func(t *testing.T) { + out := &bytes.Buffer{} + err := resolve(out, tt.upTmpl, Cfg{Schema: "evm_213"}) + require.Error(t, err) + assert.Empty(t, out.String()) + }) + + t.Run("err no schema", func(t *testing.T) { + out := &bytes.Buffer{} + err := resolve(out, tt.upTmpl, Cfg{ChainID: big.NewI(int64(3266))}) + require.Error(t, err) + assert.Empty(t, out.String()) + }) + + t.Run("ok", func(t *testing.T) { + out := &bytes.Buffer{} + err := resolve(out, tt.upTmpl, Cfg{Schema: "evm_3266", ChainID: big.NewI(int64(3266))}) + require.NoError(t, err) + lines := strings.Split(out.String(), "\n") + assert.Greater(t, len(lines), 2) + assert.Contains(t, out.String(), "CREATE TABLE evm_3266") + }) + } + }) } diff --git a/core/store/migrate/plugins/relayer/evm/forwardersDown.tmpl.sql b/core/store/migrate/plugins/relayer/evm/forwardersDown.tmpl.sql new file mode 100644 index 00000000000..c99df8e79cf --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/forwardersDown.tmpl.sql @@ -0,0 +1,21 @@ +-- Do nothing for `evm` schema for backward compatibility +{{ if ne .Schema "evm"}} +/* +DROP TABLE {{ .Schema }}.receipts +DROP TABLE {{ .Schema }}.tx_attempts +DROP TABLE {{ .Schema }}.upkeep_states +DROP TABLE {{ .Schema }}.txes +DROP TABLE {{ .Schema }}.logs +DROP TABLE {{ .Schema }}.log_poller_filters +DROP TABLE {{ .Schema }}.log_poller_blocks +DROP TABLE {{ .Schema }}.key_states; +DROP TABLE {{ .Schema }}.heads; +*/ + +-- Copy data from old table to new table +INSERT INTO evm.forwarders (address, created_at, updated_at, evm_chain_id) +SELECT address, created_at, updated_at, '{{ .ChainID }}' +FROM {{ .Schema }}.forwarders; + +DROP TABLE {{ .Schema }}.forwarders; +{{ end}} \ No newline at end of file diff --git a/core/store/migrate/plugins/relayer/evm/forwardersUp.tmpl.sql b/core/store/migrate/plugins/relayer/evm/forwardersUp.tmpl.sql new file mode 100644 index 00000000000..cc6aa0f6aeb --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/forwardersUp.tmpl.sql @@ -0,0 +1,301 @@ +-- Do nothing for `evm` schema for backward compatibility +{{ if ne .Schema "evm"}} + +-- {{ .Schema }}.forwarders definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.forwarders; + +-- TODO: make idompotent and everywhere as needed with IF NOT EXISTS + + +CREATE TABLE {{ .Schema }}.forwarders ( + id bigserial NOT NULL, + address bytea NOT NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, +-- evm_chain_id numeric(78) NOT NULL, + CONSTRAINT chk_address_length CHECK ((octet_length(address) = 20)), + CONSTRAINT evm_forwarders_address_key UNIQUE (address), + CONSTRAINT evm_forwarders_pkey PRIMARY KEY (id) +); +CREATE INDEX idx_forwarders_created_at ON {{ .Schema }}.forwarders USING brin (created_at); +CREATE INDEX idx_forwarders_evm_address ON {{ .Schema }}.forwarders USING btree (address); +--CREATE INDEX idx_forwarders_evm_chain_id ON {{ .Schema }}.forwarders USING btree (evm_chain_id); +CREATE INDEX idx_forwarders_updated_at ON {{ .Schema }}.forwarders USING brin (updated_at); + + +-- Copy data from old table to new table +INSERT INTO {{ .Schema }}.forwarders (address, created_at, updated_at) +SELECT address, created_at, updated_at +FROM evm.forwarders WHERE evm_chain_id = {{ .ChainID }}; + +DELETE FROM evm.forwarders WHERE evm_chain_id = {{ .ChainID }}; + +/* +-- {{ .Schema }}.heads definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.heads; + +CREATE TABLE {{ .Schema }}.heads ( + id bigserial NOT NULL, + hash bytea NOT NULL, + "number" int8 NOT NULL, + parent_hash bytea NOT NULL, + created_at timestamptz NOT NULL, + "timestamp" timestamptz NOT NULL, + l1_block_number int8 NULL, + evm_chain_id numeric(78) NOT NULL, + base_fee_per_gas numeric(78) NULL, + CONSTRAINT chk_hash_size CHECK ((octet_length(hash) = 32)), + CONSTRAINT chk_parent_hash_size CHECK ((octet_length(parent_hash) = 32)), + CONSTRAINT heads_pkey1 PRIMARY KEY (id) +); +CREATE UNIQUE INDEX idx_heads_evm_chain_id_hash ON {{ .Schema }}.heads USING btree (evm_chain_id, hash); +CREATE INDEX idx_heads_evm_chain_id_number ON {{ .Schema }}.heads USING btree (evm_chain_id, number); + + +-- {{ .Schema }}.key_states definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.key_states; + +CREATE TABLE {{ .Schema }}.key_states ( + id serial4 NOT NULL, + address bytea NOT NULL, + disabled bool DEFAULT false NOT NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, + evm_chain_id numeric(78) NOT NULL, + CONSTRAINT chk_address_length CHECK ((octet_length(address) = 20)), + CONSTRAINT eth_key_states_pkey PRIMARY KEY (id) +); +CREATE INDEX idx_evm_key_states_address ON {{ .Schema }}.key_states USING btree (address); +CREATE UNIQUE INDEX idx_evm_key_states_evm_chain_id_address ON {{ .Schema }}.key_states USING btree (evm_chain_id, address); + + + + + +-- {{ .Schema }}.log_poller_blocks definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.log_poller_blocks; + +CREATE TABLE {{ .Schema }}.log_poller_blocks ( + evm_chain_id numeric(78) NOT NULL, + block_hash bytea NOT NULL, + block_number int8 NOT NULL, + created_at timestamptz NOT NULL, + block_timestamp timestamptz NOT NULL, + finalized_block_number int8 DEFAULT 0 NOT NULL, + CONSTRAINT block_hash_uniq UNIQUE (evm_chain_id, block_hash), + CONSTRAINT log_poller_blocks_block_number_check CHECK ((block_number > 0)), + CONSTRAINT log_poller_blocks_finalized_block_number_check CHECK ((finalized_block_number >= 0)), + CONSTRAINT log_poller_blocks_pkey PRIMARY KEY (block_number, evm_chain_id) +); +CREATE INDEX idx_evm_log_poller_blocks_order_by_block ON {{ .Schema }}.log_poller_blocks USING btree (evm_chain_id, block_number DESC); + + +-- {{ .Schema }}.log_poller_filters definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.log_poller_filters; + +CREATE TABLE {{ .Schema }}.log_poller_filters ( + id bigserial NOT NULL, + "name" text NOT NULL, + address bytea NOT NULL, + "event" bytea NOT NULL, + evm_chain_id numeric(78) NULL, + created_at timestamptz NOT NULL, + retention int8 DEFAULT 0 NULL, + topic2 bytea NULL, + topic3 bytea NULL, + topic4 bytea NULL, + max_logs_kept int8 DEFAULT 0 NOT NULL, + logs_per_block int8 DEFAULT 0 NOT NULL, + CONSTRAINT evm_log_poller_filters_address_check CHECK ((octet_length(address) = 20)), + CONSTRAINT evm_log_poller_filters_event_check CHECK ((octet_length(event) = 32)), + CONSTRAINT evm_log_poller_filters_name_check CHECK ((length(name) > 0)), + CONSTRAINT evm_log_poller_filters_pkey PRIMARY KEY (id), + CONSTRAINT log_poller_filters_topic2_check CHECK ((octet_length(topic2) = 32)), + CONSTRAINT log_poller_filters_topic3_check CHECK ((octet_length(topic3) = 32)), + CONSTRAINT log_poller_filters_topic4_check CHECK ((octet_length(topic4) = 32)) +); +CREATE UNIQUE INDEX log_poller_filters_hash_key ON {{ .Schema }}.log_poller_filters USING btree ({{ .Schema }}.f_log_poller_filter_hash(name, evm_chain_id, address, event, topic2, topic3, topic4)); + + +-- {{ .Schema }}.logs definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.logs; + +CREATE TABLE {{ .Schema }}.logs ( + evm_chain_id numeric(78) NOT NULL, + log_index int8 NOT NULL, + block_hash bytea NOT NULL, + block_number int8 NOT NULL, + address bytea NOT NULL, + event_sig bytea NOT NULL, + topics _bytea NOT NULL, + tx_hash bytea NOT NULL, + "data" bytea NOT NULL, + created_at timestamptz NOT NULL, + block_timestamp timestamptz NOT NULL, + CONSTRAINT logs_block_number_check CHECK ((block_number > 0)), + CONSTRAINT logs_pkey PRIMARY KEY (block_hash, log_index, evm_chain_id) +); +CREATE INDEX evm_logs_by_timestamp ON {{ .Schema }}.logs USING btree (evm_chain_id, address, event_sig, block_timestamp, block_number); +CREATE INDEX evm_logs_idx ON {{ .Schema }}.logs USING btree (evm_chain_id, block_number, address, event_sig); +CREATE INDEX evm_logs_idx_data_word_five ON {{ .Schema }}.logs USING btree (address, event_sig, evm_chain_id, "substring"(data, 129, 32)); +CREATE INDEX evm_logs_idx_data_word_four ON {{ .Schema }}.logs USING btree (SUBSTRING(data FROM 97 FOR 32)); +CREATE INDEX evm_logs_idx_data_word_one ON {{ .Schema }}.logs USING btree (SUBSTRING(data FROM 1 FOR 32)); +CREATE INDEX evm_logs_idx_data_word_three ON {{ .Schema }}.logs USING btree (SUBSTRING(data FROM 65 FOR 32)); +CREATE INDEX evm_logs_idx_data_word_two ON {{ .Schema }}.logs USING btree (SUBSTRING(data FROM 33 FOR 32)); +CREATE INDEX evm_logs_idx_topic_four ON {{ .Schema }}.logs USING btree ((topics[4])); +CREATE INDEX evm_logs_idx_topic_three ON {{ .Schema }}.logs USING btree ((topics[3])); +CREATE INDEX evm_logs_idx_topic_two ON {{ .Schema }}.logs USING btree ((topics[2])); +CREATE INDEX evm_logs_idx_tx_hash ON {{ .Schema }}.logs USING btree (tx_hash); +CREATE INDEX idx_evm_logs_ordered_by_block_and_created_at ON {{ .Schema }}.logs USING btree (evm_chain_id, address, event_sig, block_number, created_at); + + +-- {{ .Schema }}.txes definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.txes; + +CREATE TABLE {{ .Schema }}.txes ( + id bigserial NOT NULL, + nonce int8 NULL, + from_address bytea NOT NULL, + to_address bytea NOT NULL, + encoded_payload bytea NOT NULL, + value numeric(78) NOT NULL, + gas_limit int8 NOT NULL, + error text NULL, + broadcast_at timestamptz NULL, + created_at timestamptz NOT NULL, + state public."eth_txes_state" DEFAULT 'unstarted'::eth_txes_state NOT NULL, + meta jsonb NULL, + subject uuid NULL, + pipeline_task_run_id uuid NULL, + min_confirmations int4 NULL, + evm_chain_id numeric(78) NOT NULL, + transmit_checker jsonb NULL, + initial_broadcast_at timestamptz NULL, + idempotency_key varchar(2000) NULL, + signal_callback bool DEFAULT false NULL, + callback_completed bool DEFAULT false NULL, + CONSTRAINT chk_broadcast_at_is_sane CHECK ((broadcast_at > '2018-12-31 17:00:00-07'::timestamp with time zone)), + CONSTRAINT chk_error_cannot_be_empty CHECK (((error IS NULL) OR (length(error) > 0))), + CONSTRAINT chk_eth_txes_fsm CHECK ((((state = 'unstarted'::eth_txes_state) AND (nonce IS NULL) AND (error IS NULL) AND (broadcast_at IS NULL) AND (initial_broadcast_at IS NULL)) OR ((state = 'in_progress'::eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NULL) AND (initial_broadcast_at IS NULL)) OR ((state = 'fatal_error'::eth_txes_state) AND (error IS NOT NULL)) OR ((state = 'unconfirmed'::eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NOT NULL) AND (initial_broadcast_at IS NOT NULL)) OR ((state = 'confirmed'::eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NOT NULL) AND (initial_broadcast_at IS NOT NULL)) OR ((state = 'confirmed_missing_receipt'::eth_txes_state) AND (nonce IS NOT NULL) AND (error IS NULL) AND (broadcast_at IS NOT NULL) AND (initial_broadcast_at IS NOT NULL)))) NOT VALID, + CONSTRAINT chk_from_address_length CHECK ((octet_length(from_address) = 20)), + CONSTRAINT chk_to_address_length CHECK ((octet_length(to_address) = 20)), + CONSTRAINT eth_txes_idempotency_key_key UNIQUE (idempotency_key), + CONSTRAINT eth_txes_pkey PRIMARY KEY (id) +); +CREATE INDEX idx_eth_txes_broadcast_at ON {{ .Schema }}.txes USING brin (broadcast_at); +CREATE INDEX idx_eth_txes_created_at ON {{ .Schema }}.txes USING brin (created_at); +CREATE INDEX idx_eth_txes_from_address ON {{ .Schema }}.txes USING btree (from_address); +CREATE INDEX idx_eth_txes_initial_broadcast_at ON {{ .Schema }}.txes USING brin (initial_broadcast_at); +CREATE INDEX idx_eth_txes_min_unconfirmed_nonce_for_key_evm_chain_id ON {{ .Schema }}.txes USING btree (evm_chain_id, from_address, nonce) WHERE (state = 'unconfirmed'::eth_txes_state); +CREATE UNIQUE INDEX idx_eth_txes_nonce_from_address_per_evm_chain_id ON {{ .Schema }}.txes USING btree (evm_chain_id, from_address, nonce); +CREATE UNIQUE INDEX idx_eth_txes_pipeline_run_task_id ON {{ .Schema }}.txes USING btree (pipeline_task_run_id) WHERE (pipeline_task_run_id IS NOT NULL); +CREATE INDEX idx_eth_txes_state_from_address_evm_chain_id ON {{ .Schema }}.txes USING btree (evm_chain_id, from_address, state) WHERE (state <> 'confirmed'::eth_txes_state); +CREATE INDEX idx_eth_txes_unstarted_subject_id_evm_chain_id ON {{ .Schema }}.txes USING btree (evm_chain_id, subject, id) WHERE ((subject IS NOT NULL) AND (state = 'unstarted'::eth_txes_state)); +CREATE UNIQUE INDEX idx_only_one_in_progress_tx_per_account_id_per_evm_chain_id ON {{ .Schema }}.txes USING btree (evm_chain_id, from_address) WHERE (state = 'in_progress'::eth_txes_state); + + +-- {{ .Schema }}.upkeep_states definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.upkeep_states; + +CREATE TABLE {{ .Schema }}.upkeep_states ( + id serial4 NOT NULL, + work_id text NOT NULL, + evm_chain_id numeric(20) NOT NULL, + upkeep_id numeric(78) NOT NULL, + completion_state int2 NOT NULL, + ineligibility_reason int2 NOT NULL, + block_number int8 NOT NULL, + inserted_at timestamptz DEFAULT CURRENT_TIMESTAMP NOT NULL, + CONSTRAINT evm_upkeep_states_pkey PRIMARY KEY (id), + CONSTRAINT work_id_len_chk CHECK (((length(work_id) > 0) AND (length(work_id) < 255))) +); +CREATE INDEX idx_evm_upkeep_state_added_at_chain_id ON {{ .Schema }}.upkeep_states USING btree (evm_chain_id, inserted_at); +CREATE UNIQUE INDEX idx_evm_upkeep_state_chainid_workid ON {{ .Schema }}.upkeep_states USING btree (evm_chain_id, work_id); + + +-- {{ .Schema }}.tx_attempts definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.tx_attempts; + +CREATE TABLE {{ .Schema }}.tx_attempts ( + id bigserial NOT NULL, + eth_tx_id int8 NOT NULL, + gas_price numeric(78) NULL, + signed_raw_tx bytea NOT NULL, + hash bytea NOT NULL, + broadcast_before_block_num int8 NULL, + state public."eth_tx_attempts_state" NOT NULL, + created_at timestamptz NOT NULL, + chain_specific_gas_limit int8 NOT NULL, + tx_type int2 DEFAULT 0 NOT NULL, + gas_tip_cap numeric(78) NULL, + gas_fee_cap numeric(78) NULL, + is_purge_attempt bool DEFAULT false NOT NULL, + CONSTRAINT chk_cannot_broadcast_before_block_zero CHECK (((broadcast_before_block_num IS NULL) OR (broadcast_before_block_num > 0))), + CONSTRAINT chk_chain_specific_gas_limit_not_zero CHECK ((chain_specific_gas_limit > 0)), + CONSTRAINT chk_eth_tx_attempts_fsm CHECK ((((state = ANY (ARRAY['in_progress'::eth_tx_attempts_state, 'insufficient_eth'::eth_tx_attempts_state])) AND (broadcast_before_block_num IS NULL)) OR (state = 'broadcast'::eth_tx_attempts_state))), + CONSTRAINT chk_hash_length CHECK ((octet_length(hash) = 32)), + CONSTRAINT chk_legacy_or_dynamic CHECK ((((tx_type = 0) AND (gas_price IS NOT NULL) AND (gas_tip_cap IS NULL) AND (gas_fee_cap IS NULL)) OR ((tx_type = 2) AND (gas_price IS NULL) AND (gas_tip_cap IS NOT NULL) AND (gas_fee_cap IS NOT NULL)))), + CONSTRAINT chk_sanity_fee_cap_tip_cap CHECK (((gas_tip_cap IS NULL) OR (gas_fee_cap IS NULL) OR (gas_tip_cap <= gas_fee_cap))), + CONSTRAINT chk_signed_raw_tx_present CHECK ((octet_length(signed_raw_tx) > 0)), + CONSTRAINT chk_tx_type_is_byte CHECK (((tx_type >= 0) AND (tx_type <= 255))), + CONSTRAINT eth_tx_attempts_pkey PRIMARY KEY (id), + CONSTRAINT eth_tx_attempts_eth_tx_id_fkey FOREIGN KEY (eth_tx_id) REFERENCES {{ .Schema }}.txes(id) ON DELETE CASCADE +); +CREATE INDEX idx_eth_tx_attempts_broadcast_before_block_num ON {{ .Schema }}.tx_attempts USING btree (broadcast_before_block_num); +CREATE INDEX idx_eth_tx_attempts_created_at ON {{ .Schema }}.tx_attempts USING brin (created_at); +CREATE UNIQUE INDEX idx_eth_tx_attempts_hash ON {{ .Schema }}.tx_attempts USING btree (hash); +CREATE INDEX idx_eth_tx_attempts_unbroadcast ON {{ .Schema }}.tx_attempts USING btree (state) WHERE (state <> 'broadcast'::eth_tx_attempts_state); +CREATE UNIQUE INDEX idx_eth_tx_attempts_unique_gas_prices ON {{ .Schema }}.tx_attempts USING btree (eth_tx_id, gas_price); +CREATE UNIQUE INDEX idx_only_one_unbroadcast_attempt_per_eth_tx ON {{ .Schema }}.tx_attempts USING btree (eth_tx_id) WHERE (state <> 'broadcast'::eth_tx_attempts_state); + + +-- {{ .Schema }}.receipts definition + +-- Drop table + +-- DROP TABLE {{ .Schema }}.receipts; + +CREATE TABLE {{ .Schema }}.receipts ( + id bigserial NOT NULL, + tx_hash bytea NOT NULL, + block_hash bytea NOT NULL, + block_number int8 NOT NULL, + transaction_index int8 NOT NULL, + receipt jsonb NOT NULL, + created_at timestamptz NOT NULL, + CONSTRAINT chk_hash_length CHECK (((octet_length(tx_hash) = 32) AND (octet_length(block_hash) = 32))), + CONSTRAINT eth_receipts_pkey PRIMARY KEY (id), + CONSTRAINT eth_receipts_tx_hash_fkey FOREIGN KEY (tx_hash) REFERENCES {{ .Schema }}.tx_attempts(hash) ON DELETE CASCADE +); +CREATE INDEX idx_eth_receipts_block_number ON {{ .Schema }}.receipts USING btree (block_number); +CREATE INDEX idx_eth_receipts_created_at ON {{ .Schema }}.receipts USING brin (created_at); +CREATE UNIQUE INDEX idx_eth_receipts_unique ON {{ .Schema }}.receipts USING btree (tx_hash, block_hash); +*/ +{{ end }} \ No newline at end of file diff --git a/core/store/migrate/plugins/relayer/evm/headsDown.tmpl.sql b/core/store/migrate/plugins/relayer/evm/headsDown.tmpl.sql new file mode 100644 index 00000000000..5d45793cc35 --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/headsDown.tmpl.sql @@ -0,0 +1,5 @@ +INSERT INTO evm.heads (hash, "number", parent_hash, created_at, "timestamp", l1_block_number, evm_chain_id, base_fee_per_gas) +SELECT hash, "number", parent_hash, created_at, "timestamp", l1_block_number, '{{ .ChainID }}', base_fee_per_gas +FROM {{ .Schema }}.heads; + +DROP TABLE {{ .Schema }}.heads; diff --git a/core/store/migrate/plugins/relayer/evm/headsUp.tmpl.sql b/core/store/migrate/plugins/relayer/evm/headsUp.tmpl.sql new file mode 100644 index 00000000000..ac575af7352 --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/headsUp.tmpl.sql @@ -0,0 +1,25 @@ +-- Do nothing for `evm` schema for backward compatibility +{{ if ne .Schema "evm"}} +CREATE TABLE {{ .Schema }}.heads ( + id bigserial NOT NULL, + hash bytea NOT NULL, + "number" int8 NOT NULL, + parent_hash bytea NOT NULL, + created_at timestamptz NOT NULL, + "timestamp" timestamptz NOT NULL, + l1_block_number int8 NULL, +-- evm_chain_id numeric(78) NOT NULL, + base_fee_per_gas numeric(78) NULL, + CONSTRAINT chk_hash_size CHECK ((octet_length(hash) = 32)), + CONSTRAINT chk_parent_hash_size CHECK ((octet_length(parent_hash) = 32)), + CONSTRAINT heads_pkey1 PRIMARY KEY (id) +); +CREATE UNIQUE INDEX idx_heads_hash ON {{ .Schema }}.heads USING btree (hash); +CREATE INDEX idx_heads_number ON {{ .Schema }}.heads USING btree ("number"); + +INSERT INTO {{ .Schema }}.heads (hash, "number", parent_hash, created_at, "timestamp", l1_block_number, base_fee_per_gas) +SELECT hash, "number", parent_hash, created_at, "timestamp", l1_block_number, base_fee_per_gas +FROM evm.heads WHERE evm_chain_id = '{{ .ChainID }}'; + +DELETE FROM evm.heads WHERE evm_chain_id = '{{ .ChainID }}'; +{{ end}} \ No newline at end of file diff --git a/core/store/migrate/plugins/relayer/evm/initDown.tmpl.sql b/core/store/migrate/plugins/relayer/evm/initDown.tmpl.sql index 0f42928160d..c99df8e79cf 100644 --- a/core/store/migrate/plugins/relayer/evm/initDown.tmpl.sql +++ b/core/store/migrate/plugins/relayer/evm/initDown.tmpl.sql @@ -1,3 +1,5 @@ +-- Do nothing for `evm` schema for backward compatibility +{{ if ne .Schema "evm"}} /* DROP TABLE {{ .Schema }}.receipts DROP TABLE {{ .Schema }}.tx_attempts @@ -15,4 +17,5 @@ INSERT INTO evm.forwarders (address, created_at, updated_at, evm_chain_id) SELECT address, created_at, updated_at, '{{ .ChainID }}' FROM {{ .Schema }}.forwarders; -DROP TABLE {{ .Schema }}.forwarders; \ No newline at end of file +DROP TABLE {{ .Schema }}.forwarders; +{{ end}} \ No newline at end of file diff --git a/core/store/migrate/plugins/relayer/evm/initUp.tmpl.sql b/core/store/migrate/plugins/relayer/evm/initUp.tmpl.sql index 292f6e42b0a..cc6aa0f6aeb 100644 --- a/core/store/migrate/plugins/relayer/evm/initUp.tmpl.sql +++ b/core/store/migrate/plugins/relayer/evm/initUp.tmpl.sql @@ -1,3 +1,6 @@ +-- Do nothing for `evm` schema for backward compatibility +{{ if ne .Schema "evm"}} + -- {{ .Schema }}.forwarders definition -- Drop table @@ -6,6 +9,7 @@ -- TODO: make idompotent and everywhere as needed with IF NOT EXISTS + CREATE TABLE {{ .Schema }}.forwarders ( id bigserial NOT NULL, address bytea NOT NULL, @@ -27,6 +31,7 @@ INSERT INTO {{ .Schema }}.forwarders (address, created_at, updated_at) SELECT address, created_at, updated_at FROM evm.forwarders WHERE evm_chain_id = {{ .ChainID }}; +DELETE FROM evm.forwarders WHERE evm_chain_id = {{ .ChainID }}; /* -- {{ .Schema }}.heads definition @@ -292,4 +297,5 @@ CREATE TABLE {{ .Schema }}.receipts ( CREATE INDEX idx_eth_receipts_block_number ON {{ .Schema }}.receipts USING btree (block_number); CREATE INDEX idx_eth_receipts_created_at ON {{ .Schema }}.receipts USING brin (created_at); CREATE UNIQUE INDEX idx_eth_receipts_unique ON {{ .Schema }}.receipts USING btree (tx_hash, block_hash); -*/ \ No newline at end of file +*/ +{{ end }} \ No newline at end of file diff --git a/core/store/migrate/plugins/relayer/evm/key_statesDown.tmpl.sql b/core/store/migrate/plugins/relayer/evm/key_statesDown.tmpl.sql new file mode 100644 index 00000000000..653e4f750df --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/key_statesDown.tmpl.sql @@ -0,0 +1,4 @@ +INSERT INTO evm.key_states (address,disabled,created_at,updated_at,evm_chain_id) +SELECT address,disabled,created_at,updated_at,'{{ .ChainID }}' FROM {{ .Schema }}.key_states; + +DROP TABLE {{ .Schema }}.key_states; \ No newline at end of file diff --git a/core/store/migrate/plugins/relayer/evm/key_statesUp.tmpl.sql b/core/store/migrate/plugins/relayer/evm/key_statesUp.tmpl.sql new file mode 100644 index 00000000000..aeb0c3d549b --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/key_statesUp.tmpl.sql @@ -0,0 +1,17 @@ +CREATE TABLE {{ .Schema }}.key_states ( + id serial4 NOT NULL, + address bytea NOT NULL, + disabled bool DEFAULT false NOT NULL, + created_at timestamptz NOT NULL, + updated_at timestamptz NOT NULL, +-- evm_chain_id numeric(78) NOT NULL, + CONSTRAINT chk_address_length CHECK ((octet_length(address) = 20)), + CONSTRAINT eth_key_states_pkey PRIMARY KEY (id) +); +CREATE INDEX idx_evm_key_states_address ON {{ .Schema }}.key_states USING btree (address); +--CREATE UNIQUE INDEX idx_evm_key_states_evm_chain_id_address ON {{ .Schema }}.key_states USING btree (evm_chain_id, address); + +INSERT INTO {{ .Schema }}.key_states (address, disabled, created_at, updated_at) +SELECT address, disabled, created_at, updated_at FROM evm.key_states WHERE evm_chain_id = '{{ .ChainID }}'; + +DELETE FROM evm.key_states WHERE evm_chain_id = '{{ .ChainID }}'; diff --git a/core/store/migrate/plugins/relayer/evm/migrate.go b/core/store/migrate/plugins/relayer/evm/migrate.go index 86d8f6114aa..3ed2a0b664e 100644 --- a/core/store/migrate/plugins/relayer/evm/migrate.go +++ b/core/store/migrate/plugins/relayer/evm/migrate.go @@ -44,12 +44,10 @@ func newProvider(db *sqlx.DB, cfg Cfg) (*goose.Provider, error) { return nil, fmt.Errorf("failed to create goose store for table %s: %w", mTable, err) } - goMigrations := make([]*goose.Migration, 0) - up0002, down0002, err := generate0002(cfg) + goMigrations, err := generateInitialMigrations(cfg) if err != nil { - return nil, fmt.Errorf("failed to generate migration 0002 for cfg %v: %w", cfg, err) + return nil, fmt.Errorf("failed to generate initial migrations for cfg %v: %w", cfg, err) } - goMigrations = append(goMigrations, goose.NewGoMigration(2, up0002, down0002)) // note we are leaking here, but can't delete the temp dir until the migrations are actually executed // maybe update the cache to store the temp dir and delete it when cache is deleted @@ -87,19 +85,6 @@ func newProvider(db *sqlx.DB, cfg Cfg) (*goose.Provider, error) { return p, nil } -/* -func setupPluginMigrations(cfg Cfg) error { - // reset the base fs and the global migrations - goose.SetBaseFS(nil) // we don't want to use the base fs for plugin migrations because the embedded fs contains templates, not sql files - goose.ResetGlobalMigrations() - goose.SetTableName(fmt.Sprintf("goose_migration_relayer_%s_%s", cfg.Schema, cfg.ChainID.String())) - err := Register0002(cfg) - if err != nil { - return fmt.Errorf("failed to register migration 0002: %w", err) - } - return nil -} -*/ // Migrate migrates a subsystem of the chainlink database. // It generates migrations based on the template for the subsystem and applies them to the database. func Migrate(ctx context.Context, db *sqlx.DB, cfg Cfg) error { @@ -110,24 +95,6 @@ func Migrate(ctx context.Context, db *sqlx.DB, cfg Cfg) error { if todo, _ := p.HasPending(ctx); !todo { return nil } - /* - tmpDir, err := os.MkdirTemp("", cfg.Schema) - if err != nil { - return fmt.Errorf("failed to create temp dir: %w", err) - } - defer os.RemoveAll(tmpDir) - - d := filepath.Join(tmpDir, cfg.Schema, cfg.ChainID.String()) - err = os.MkdirAll(d, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory %s: %w", d, err) - } - migrations, err := generateMigrations(embeddedTmplFS, MigrationRootDir, d, cfg) - if err != nil { - return fmt.Errorf("failed to generate migrations for opt %v: %w", cfg, err) - } - fmt.Printf("Generated migrations: %v\n", migrations) - */ // seems to be upside about global go migrations? //goose.ResetGlobalMigrations() r, err := p.Up(ctx) diff --git a/core/store/migrate/plugins/relayer/evm/migrate_test.go b/core/store/migrate/plugins/relayer/evm/migrate_test.go index 61e8209ff3d..c8d45fe55c9 100644 --- a/core/store/migrate/plugins/relayer/evm/migrate_test.go +++ b/core/store/migrate/plugins/relayer/evm/migrate_test.go @@ -1,8 +1,12 @@ package evm_test import ( + "context" + "fmt" + "os" "testing" + "github.com/jmoiron/sqlx" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v4" @@ -35,13 +39,216 @@ func TestMigrate(t *testing.T) { v2, err := evm.Current(ctx, db, cfg) require.NoError(t, err) - assert.GreaterOrEqual(t, int64(2), v2) + assert.GreaterOrEqual(t, v2, int64(2)) err = evm.Rollback(ctx, db, null.IntFrom(0), cfg) require.NoError(t, err) v2, err = evm.Current(ctx, db, cfg) require.NoError(t, err) + assert.Equal(t, int64(0), v2) }) } + +func TestGoDataMigration(t *testing.T) { + var totalRecords = 3 // by convention, each legacy table is loaded with 3 records; 2 for chain 0 and 1 for chain 1 + type test struct { + table string + } + var cases = []test{ + { + table: "forwarders", + }, + { + table: "heads", + }, + { + table: "key_states", + }, + } + for _, tt := range cases { + t.Run(tt.table+" data migration", func(t *testing.T) { + ctx := testutils.Context(t) + db := loadLegacyDatabase(t, ctx) + + type dataTest struct { + name string + cfg evm.Cfg + wantMigratedRecordCount int + } + var dataCases = []dataTest{ + { + name: "chain 731", + cfg: evm.Cfg{ + Schema: "evm_731", + ChainID: big.NewI(int64(731)), + }, + wantMigratedRecordCount: 0, + }, + { + name: "chain 0", + cfg: evm.Cfg{ + Schema: "evm_0", + ChainID: big.NewI(int64(0)), + }, + wantMigratedRecordCount: 2, + }, + { + name: "chain 1", + cfg: evm.Cfg{ + Schema: "evm_1", + ChainID: big.NewI(int64(1)), + }, + wantMigratedRecordCount: 1, + }, + } + for _, dataCase := range dataCases { + t.Run(dataCase.name, func(t *testing.T) { + err := evm.Migrate(ctx, db, dataCase.cfg) + require.NoError(t, err) + var moved int + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM %s.%s", dataCase.cfg.Schema, tt.table)) + require.NoError(t, err) + require.Equal(t, dataCase.wantMigratedRecordCount, moved) + + var remaining int + err = db.Get(&remaining, fmt.Sprintf("SELECT COUNT(*) FROM evm.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, totalRecords-dataCase.wantMigratedRecordCount, remaining) + + err = evm.Rollback(ctx, db, null.IntFrom(0), dataCase.cfg) + require.NoError(t, err) + var rollbackTotal int + err = db.Get(&rollbackTotal, fmt.Sprintf("SELECT COUNT(*) FROM evm.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, totalRecords, rollbackTotal) + + // cfg schema should be gone + var schemaCount int + err = db.Get(&schemaCount, fmt.Sprintf("SELECT COUNT(*) FROM information_schema.schemata WHERE schema_name = '%s'", dataCase.cfg.Schema)) + require.NoError(t, err) + require.Equal(t, 0, schemaCount) + + }) + } + /* + cfg := evm.Cfg{ + Schema: "evm_731", + ChainID: big.NewI(int64(731)), + } + + err := evm.Migrate(ctx, db, cfg) + require.NoError(t, err) + // no data for chain 731 in the fixtures by convention + var cnt int + moved = -1 + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm_731.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 0, moved) + // all 3 records should still be in the legacy table + moved = -1 + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 3, moved) + + // run the migration for chain 0 which has 2 records + cfg = evm.Cfg{ + Schema: "evm_0", + ChainID: big.NewI(int64(0)), + } + err = evm.Migrate(ctx, db, cfg) + require.NoError(t, err) + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm_0.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 2, moved) + // the 2 records should have been moved from the legacy table. leaving only 1 record + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 1, moved) + + // rollback of the migration for chain 0 should move the 2 records back + err = evm.Rollback(ctx, db, null.IntFrom(0), cfg) + require.NoError(t, err) + moved = -1 + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 3, moved) + // evm_0 schema should be gone + moved = -1 + err = db.Get(&moved, "SELECT COUNT(*) FROM information_schema.schemata WHERE schema_name = 'evm_0'") + require.NoError(t, err) + require.Equal(t, 0, moved) + + // run the migration for chain 1 which has 1 record + cfg = evm.Cfg{ + Schema: "evm_1", + ChainID: big.NewI(int64(1)), + } + err = evm.Migrate(ctx, db, cfg) + require.NoError(t, err) + moved = -1 + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm_1.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 1, moved) + // the 1 record should have been moved from the legacy table. leaving 2 records + moved = -1 + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 2, moved) + + // rollback of the migration for chain 1 should move the 1 record back + err = evm.Rollback(ctx, db, null.IntFrom(0), cfg) + require.NoError(t, err) + // the 1 record should have been moved back to the legacy table resulting in 3 records + moved = -1 + err = db.Get(&moved, fmt.Sprintf("SELECT COUNT(*) FROM evm.%s", tt.table)) + require.NoError(t, err) + require.Equal(t, 3, moved) + // evm_1 schema should be gone + moved = -1 + err = db.Get(&moved, "SELECT COUNT(*) FROM information_schema.schemata WHERE schema_name = 'evm_1'") + require.NoError(t, err) + require.Equal(t, 0, moved) + */ + }) + } +} + +// loadLegacyDatabase loads the legacy forwarder and heads data into the database +// as a matter of convenience and convention, each legacy table is loaded with 3 records: 2 for chain 0 and 1 for chain 1 +func loadLegacyDatabase(t *testing.T, ctx context.Context) *sqlx.DB { + t.Helper() + + _, db := heavyweight.FullTestDBEmptyV2(t, nil) + err := migrate.Migrate(ctx, db.DB) + require.NoError(t, err) + // load the legacy forwarder data + forwarderMigration, err := os.ReadFile("testdata/forwarders/initial.sql") + require.NoError(t, err) + _, err = db.Exec(string(forwarderMigration)) + require.NoError(t, err) + var cnt int + err = db.Get(&cnt, "SELECT COUNT(*) FROM evm.forwarders") + require.NoError(t, err) + require.Equal(t, 3, cnt) + + // load the legacy heads data + headsMigration, err := os.ReadFile("testdata/heads/initial.sql") + require.NoError(t, err) + _, err = db.Exec(string(headsMigration)) + require.NoError(t, err) + err = db.Get(&cnt, "SELECT COUNT(*) FROM evm.heads") + require.NoError(t, err) + require.Equal(t, 3, cnt) + + // load the legacy key_states data + keyStatesMigration, err := os.ReadFile("testdata/key_states/initial.sql") + require.NoError(t, err) + _, err = db.Exec(string(keyStatesMigration)) + require.NoError(t, err) + err = db.Get(&cnt, "SELECT COUNT(*) FROM evm.key_states") + require.NoError(t, err) + require.Equal(t, 3, cnt) + return db +} diff --git a/core/store/migrate/plugins/relayer/evm/resolver.go b/core/store/migrate/plugins/relayer/evm/resolver.go index 43f8627de28..7c6baadd76d 100644 --- a/core/store/migrate/plugins/relayer/evm/resolver.go +++ b/core/store/migrate/plugins/relayer/evm/resolver.go @@ -25,11 +25,24 @@ type Cfg struct { ChainID *big.Big } +func (c Cfg) Validate() error { + if c.Schema == "" { + return fmt.Errorf("schema is required") + } + if c.ChainID == nil { + return fmt.Errorf("chain id is required") + } + return nil +} + var migrationSuffix = ".tmpl.sql" func resolve(out io.Writer, in string, val Cfg) error { + if err := val.Validate(); err != nil { + return err + } id := fmt.Sprintf("init_%s_%s", val.Schema, val.ChainID) - tmpl, err := template.New(id).Parse(in) + tmpl, err := template.New(id).Option("missingkey=error").Parse(in) if err != nil { return fmt.Errorf("failed to parse template %s: %w", in, err) } diff --git a/core/store/migrate/plugins/relayer/evm/testdata/forwarders/initial.sql b/core/store/migrate/plugins/relayer/evm/testdata/forwarders/initial.sql new file mode 100644 index 00000000000..e265bf9ab3d --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/testdata/forwarders/initial.sql @@ -0,0 +1,4 @@ +INSERT INTO evm.forwarders (address,created_at,updated_at,evm_chain_id) VALUES + (decode('3031323334353637383930313233343536373839','hex'),'2024-06-26 15:09:46.75491-06','2024-06-26 15:09:46.75491-06',0), + (decode('3131313131313131313131313131313131313131','hex'),'2024-06-26 15:10:49.646974-06','2024-06-26 15:10:49.646974-06',1), + (decode('3030303030303030303030303030303030303030','hex'),'2024-06-26 15:11:48.808834-06','2024-06-26 15:11:48.808834-06',0); \ No newline at end of file diff --git a/core/store/migrate/plugins/relayer/evm/testdata/heads/initial.sql b/core/store/migrate/plugins/relayer/evm/testdata/heads/initial.sql new file mode 100644 index 00000000000..f069bdf657b --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/testdata/heads/initial.sql @@ -0,0 +1,4 @@ +INSERT INTO evm.heads (hash,"number",parent_hash,created_at,"timestamp",l1_block_number,evm_chain_id,base_fee_per_gas) VALUES + (decode('3031323334353637383930313233343536373839303132333435363738393031','hex'),0,decode('3030303334353637383930313233343536373839303132333435363738393031','hex'),'2024-06-26 15:45:44.209895-06','2024-06-26 15:45:44.209895-06',0,0,0), + (decode('3031323334353637383930313233343536373839303132333435363738393030','hex'),0,decode('3030303334353637383930313233343536373839303132333435363738393031','hex'),'2024-06-26 15:46:51.140604-06','2024-06-26 15:46:51.140604-06',0,1,0), + (decode('3939393934353637383930313233343536373839303132333435363738393031','hex'),0,decode('3031323334353637383930313233343536373839303132333435363738393031','hex'),'2024-06-26 15:47:32.70539-06','2024-06-26 15:47:32.70539-06',0,0,0); diff --git a/core/store/migrate/plugins/relayer/evm/testdata/key_states/initial.sql b/core/store/migrate/plugins/relayer/evm/testdata/key_states/initial.sql new file mode 100644 index 00000000000..3a1ba69307f --- /dev/null +++ b/core/store/migrate/plugins/relayer/evm/testdata/key_states/initial.sql @@ -0,0 +1,4 @@ +INSERT INTO evm.key_states (address,disabled,created_at,updated_at,evm_chain_id) VALUES + (decode('3031323334353637383930313233343536373031','hex'),false,'2024-06-26 16:57:15.040764-06','2024-06-26 16:57:15.040764-06',1), + (decode('3031323334353637383930313233343536373030','hex'),false,'2024-06-26 16:57:39.22825-06','2024-06-26 16:57:39.22825-06',0), + (decode('3031323334353637383930313233343536373839','hex'),false,'2024-06-26 16:58:04.775458-06','2024-06-26 16:58:04.775458-06',0);