From 785136bfa1a298e20cc0a9ace0cf889bda28bc28 Mon Sep 17 00:00:00 2001 From: avichalp Date: Sat, 8 Apr 2023 13:22:35 +0530 Subject: [PATCH 01/14] add sepolia config and remove goerli healthbot Signed-off-by: avichalp --- .../testnet/api/.env_validator.example | 2 ++ docker/deployed/testnet/api/config.json | 27 +++++++++++++++++++ .../testnet/healthbot/.env_healthbot.example | 3 +++ docker/deployed/testnet/healthbot/config.json | 14 ++++++++++ pkg/client/chains.go | 25 +++++++++++------ 5 files changed, 63 insertions(+), 8 deletions(-) diff --git a/docker/deployed/testnet/api/.env_validator.example b/docker/deployed/testnet/api/.env_validator.example index 11c50379..1d8590f5 100644 --- a/docker/deployed/testnet/api/.env_validator.example +++ b/docker/deployed/testnet/api/.env_validator.example @@ -4,6 +4,8 @@ VALIDATOR_ALCHEMY_POLYGON_MUMBAI_API_KEY= VALIDATOR_POLYGON_MUMBAI_SIGNER_PRIVATE_KEY= VALIDATOR_ALCHEMY_ETHEREUM_GOERLI_API_KEY= VALIDATOR_ETHEREUM_GOERLI_SIGNER_PRIVATE_KEY= +VALIDATOR_ALCHEMY_ETHEREUM_SEPOLIA_API_KEY= +VALIDATOR_ETHEREUM_SEPOLIA_SIGNER_PRIVATE_KEY= VALIDATOR_ALCHEMY_ETHEREUM_MAINNET_API_KEY= VALIDATOR_ETHEREUM_MAINNET_SIGNER_PRIVATE_KEY= VALIDATOR_ALCHEMY_POLYGON_MAINNET_API_KEY= diff --git a/docker/deployed/testnet/api/config.json b/docker/deployed/testnet/api/config.json index 47805a0b..fc07ebd8 100644 --- a/docker/deployed/testnet/api/config.json +++ b/docker/deployed/testnet/api/config.json @@ -78,6 +78,33 @@ }, "HashCalculationStep": 150 }, + { + "Name": "Ethereum Sepolia", + "ChainID": 11155111, + "Registry": { + "EthEndpoint": "wss://eth-sepolia.g.alchemy.com/v2//${VALIDATOR_ALCHEMY_ETHEREUM_SEPOLIA_API_KEY}", + "ContractAddress": "0xTODO" + }, + "Signer": { + "PrivateKey": "${VALIDATOR_ETHEREUM_SEPOLIA_SIGNER_PRIVATE_KEY}" + }, + "EventFeed": { + "ChainAPIBackoff": "15s", + "NewBlockPollFreq": "10s", + "MinBlockDepth": 1, + "PersistEvents": true + }, + "EventProcessor": { + "BlockFailedExecutionBackoff": "10s", + "DedupExecutedTxns": true + }, + "NonceTracker": { + "CheckInterval": "15s", + "StuckInterval": "10m", + "MinBlockDepth": 1 + }, + "HashCalculationStep": 150 + }, { "Name": "Polygon Mumbai", "ChainID": 80001, diff --git a/docker/deployed/testnet/healthbot/.env_healthbot.example b/docker/deployed/testnet/healthbot/.env_healthbot.example index 2a41ed4d..083e5c42 100644 --- a/docker/deployed/testnet/healthbot/.env_healthbot.example +++ b/docker/deployed/testnet/healthbot/.env_healthbot.example @@ -1,3 +1,6 @@ +HEALTHBOT_ETHEREUM_SEPOLIA_PRIVATE_KEY= +HEALTHBOT_ETHEREUM_SEPOLIA_API_KEY= +HEALTHBOT_ETHEREUM_SEPOLIA_TABLE= HEALTHBOT_POLYGON_MUMBAI_PRIVATE_KEY= HEALTHBOT_POLYGON_MUMBAI_API_KEY= HEALTHBOT_POLYGON_MUMBAI_TABLE= diff --git a/docker/deployed/testnet/healthbot/config.json b/docker/deployed/testnet/healthbot/config.json index 49fc947e..bea4137b 100644 --- a/docker/deployed/testnet/healthbot/config.json +++ b/docker/deployed/testnet/healthbot/config.json @@ -17,6 +17,20 @@ "Tablename": "${HEALTHBOT_ARBITRUM_GOERLI_TABLE}" } }, + { + "ChainID": 11155111, + "WalletPrivateKey": "${HEALTHBOT_ETHEREUM_SEPOLIA_PRIVATE_KEY}", + "AlchemyAPIKey": "${HEALTHBOT_ALCHEMY_ETHEREUM_SEPOLIA_API_KEY}", + "Probe": { + "CheckInterval": "1.5h", + "ReceiptTimeout": "90s", + "Tablename": "${HEALTHBOT_ETHEREUM_SEPOLIA_TABLE}" + }, + "OverrideClient": { + "SuggestedGasPriceMultiplier": 1.2, + "EstimatedGasLimitMultiplier": 1.1 + } + }, { "ChainID": 80001, "WalletPrivateKey": "${HEALTHBOT_POLYGON_MUMBAI_PRIVATE_KEY}", diff --git a/pkg/client/chains.go b/pkg/client/chains.go index 50e8f353..6836fec3 100644 --- a/pkg/client/chains.go +++ b/pkg/client/chains.go @@ -22,6 +22,7 @@ var ChainIDs = struct { ArbitrumNova ChainID Filecoin ChainID EthereumGoerli ChainID + EthereumSepolia ChainID OptimismGoerli ChainID ArbitrumGoerli ChainID FilecoinHyperspace ChainID @@ -35,6 +36,7 @@ var ChainIDs = struct { ArbitrumNova: 42170, Filecoin: 314, EthereumGoerli: 5, + EthereumSepolia: 11155111, OptimismGoerli: 420, ArbitrumGoerli: 421613, FilecoinHyperspace: 3141, @@ -94,6 +96,12 @@ var Chains = map[ChainID]Chain{ Name: "Ethereum Goerli", ContractAddr: common.HexToAddress("0xDA8EA22d092307874f30A1F277D1388dca0BA97a"), }, + ChainIDs.EthereumSepolia: { + Endpoint: testnetURL, + ID: ChainIDs.EthereumSepolia, + Name: "Ethereum Sepolia", + ContractAddr: common.HexToAddress("0xDA8EA22d092307874f30A1F277D1388dca0BA97a"), + }, ChainIDs.OptimismGoerli: { Endpoint: testnetURL, ID: ChainIDs.OptimismGoerli, @@ -140,14 +148,15 @@ var InfuraURLs = map[ChainID]string{ // AlchemyURLs contains the URLs for supported chains for Alchemy. var AlchemyURLs = map[ChainID]string{ - ChainIDs.EthereumGoerli: "https://eth-goerli.g.alchemy.com/v2/%s", - ChainIDs.Ethereum: "https://eth-mainnet.g.alchemy.com/v2/%s", - ChainIDs.OptimismGoerli: "https://opt-goerli.g.alchemy.com/v2/%s", - ChainIDs.Optimism: "https://opt-mainnet.g.alchemy.com/v2/%s", - ChainIDs.ArbitrumGoerli: "https://arb-goerli.g.alchemy.com/v2/%s", - ChainIDs.Arbitrum: "https://arb-mainnet.g.alchemy.com/v2/%s", - ChainIDs.PolygonMumbai: "https://polygon-mumbai.g.alchemy.com/v2/%s", - ChainIDs.Polygon: "https://polygon-mainnet.g.alchemy.com/v2/%s", + ChainIDs.EthereumGoerli: "https://eth-goerli.g.alchemy.com/v2/%s", + ChainIDs.EthereumSepolia: "https://eth-sepolia.g.alchemy.com/v2/%s", + ChainIDs.Ethereum: "https://eth-mainnet.g.alchemy.com/v2/%s", + ChainIDs.OptimismGoerli: "https://opt-goerli.g.alchemy.com/v2/%s", + ChainIDs.Optimism: "https://opt-mainnet.g.alchemy.com/v2/%s", + ChainIDs.ArbitrumGoerli: "https://arb-goerli.g.alchemy.com/v2/%s", + ChainIDs.Arbitrum: "https://arb-mainnet.g.alchemy.com/v2/%s", + ChainIDs.PolygonMumbai: "https://polygon-mumbai.g.alchemy.com/v2/%s", + ChainIDs.Polygon: "https://polygon-mainnet.g.alchemy.com/v2/%s", } // QuickNodeURLs contains the URLs for supported chains for QuickNode. From 7f926d85eb8b7b0a52392b69ce91478d930da36c Mon Sep 17 00:00:00 2001 From: avichalp Date: Mon, 10 Apr 2023 14:31:48 +0900 Subject: [PATCH 02/14] add placeholder contract addr for sepolia Signed-off-by: avichalp --- pkg/client/chains.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/client/chains.go b/pkg/client/chains.go index 6836fec3..aef1a7b0 100644 --- a/pkg/client/chains.go +++ b/pkg/client/chains.go @@ -100,7 +100,7 @@ var Chains = map[ChainID]Chain{ Endpoint: testnetURL, ID: ChainIDs.EthereumSepolia, Name: "Ethereum Sepolia", - ContractAddr: common.HexToAddress("0xDA8EA22d092307874f30A1F277D1388dca0BA97a"), + ContractAddr: common.HexToAddress("0xTODO"), }, ChainIDs.OptimismGoerli: { Endpoint: testnetURL, From e3bd7bc0f93bf2faf8148cf1f02b12860af71668 Mon Sep 17 00:00:00 2001 From: avichalp Date: Mon, 24 Apr 2023 11:18:37 +0800 Subject: [PATCH 03/14] Add sepolia contract addr in the config Signed-off-by: avichalp --- docker/deployed/testnet/api/config.json | 4 ++-- docker/deployed/testnet/healthbot/config.json | 2 +- pkg/client/chains.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/deployed/testnet/api/config.json b/docker/deployed/testnet/api/config.json index fc07ebd8..2b611fba 100644 --- a/docker/deployed/testnet/api/config.json +++ b/docker/deployed/testnet/api/config.json @@ -82,8 +82,8 @@ "Name": "Ethereum Sepolia", "ChainID": 11155111, "Registry": { - "EthEndpoint": "wss://eth-sepolia.g.alchemy.com/v2//${VALIDATOR_ALCHEMY_ETHEREUM_SEPOLIA_API_KEY}", - "ContractAddress": "0xTODO" + "EthEndpoint": "wss://eth-sepolia.g.alchemy.com/v2/${VALIDATOR_ALCHEMY_ETHEREUM_SEPOLIA_API_KEY}", + "ContractAddress": "0xc50C62498448ACc8dBdE43DA77f8D5D2E2c7597D" }, "Signer": { "PrivateKey": "${VALIDATOR_ETHEREUM_SEPOLIA_SIGNER_PRIVATE_KEY}" diff --git a/docker/deployed/testnet/healthbot/config.json b/docker/deployed/testnet/healthbot/config.json index bea4137b..05656312 100644 --- a/docker/deployed/testnet/healthbot/config.json +++ b/docker/deployed/testnet/healthbot/config.json @@ -22,7 +22,7 @@ "WalletPrivateKey": "${HEALTHBOT_ETHEREUM_SEPOLIA_PRIVATE_KEY}", "AlchemyAPIKey": "${HEALTHBOT_ALCHEMY_ETHEREUM_SEPOLIA_API_KEY}", "Probe": { - "CheckInterval": "1.5h", + "CheckInterval": "1h", "ReceiptTimeout": "90s", "Tablename": "${HEALTHBOT_ETHEREUM_SEPOLIA_TABLE}" }, diff --git a/pkg/client/chains.go b/pkg/client/chains.go index aef1a7b0..f9280d85 100644 --- a/pkg/client/chains.go +++ b/pkg/client/chains.go @@ -100,7 +100,7 @@ var Chains = map[ChainID]Chain{ Endpoint: testnetURL, ID: ChainIDs.EthereumSepolia, Name: "Ethereum Sepolia", - ContractAddr: common.HexToAddress("0xTODO"), + ContractAddr: common.HexToAddress("0xc50C62498448ACc8dBdE43DA77f8D5D2E2c7597D"), }, ChainIDs.OptimismGoerli: { Endpoint: testnetURL, From d75fb2ca4627056d39bec155e4a027b690afa00a Mon Sep 17 00:00:00 2001 From: Bruno Calza Date: Fri, 31 Mar 2023 14:42:17 -0300 Subject: [PATCH 04/14] refactors how database is accessed [stagingdeploy] Signed-off-by: Bruno Calza --- .bingo/.gitignore | 12 - .bingo/README.md | 14 - .bingo/Variables.mk | 31 - .bingo/bingo.mod | 5 - .bingo/go.mod | 1 - .bingo/kustomize.mod | 9 - .bingo/variables.env | 14 - .golangci.yml | 2 +- Makefile | 8 +- cmd/api/main.go | 135 ++-- .../dashboards/validator-dashboard.json | 317 +++++++-- internal/chains/chains.go | 2 - internal/formatter/formatter.go | 6 +- internal/formatter/formatter_test.go | 34 +- internal/gateway/gateway.go | 232 +++++-- internal/gateway/gateway_instrumented.go | 29 +- internal/gateway/impl/gateway_store.go | 173 +++++ .../gateway_store_test.go} | 240 +++++-- .../gateway/impl}/rowstotabledata.go | 22 +- internal/router/controllers/controller.go | 14 +- .../router/controllers/controller_test.go | 70 +- internal/tableland/acl.go | 3 +- internal/tableland/impl/acl.go | 71 +- internal/tableland/impl/tableland_test.go | 111 ++- internal/tableland/tableland.go | 155 ----- mocks/Gateway.go | 80 +-- .../internal => database}/db/acl.sql.go | 0 .../system/internal => database}/db/db.go | 20 - .../db/evm_events.sql.go | 0 .../system/internal => database}/db/id.sql.go | 0 .../system/internal => database}/db/models.go | 0 .../internal => database}/db/nonce.sql.go | 0 .../internal => database}/db/receipt.sql.go | 0 pkg/database/db/registry.sql.go | 33 + .../internal => database}/db/schema.sql.go | 0 .../migrations/001_init.down.sql | 0 .../migrations/001_init.up.sql | 0 .../migrations/002_receipterroridx.down.sql | 0 .../migrations/002_receipterroridx.up.sql | 0 .../migrations/003_evm_events.down.sql | 0 .../migrations/003_evm_events.up.sql | 0 .../migrations/004_system_id.down.sql | 0 .../migrations/004_system_id.up.sql | 0 .../migrations/migrations.go | 16 +- .../impl/system => database}/queries/acl.sql | 0 .../queries/evm_events.sql | 0 .../impl/system => database}/queries/id.sql | 0 .../system => database}/queries/nonce.sql | 0 .../system => database}/queries/receipt.sql | 0 pkg/database/queries/registry.sql | 2 + .../system => database}/queries/schema.sql | 0 .../system => database}/schemas/schemas.sql | 0 .../impl/system => database}/sqlc.yaml | 2 +- pkg/database/sqlite_db.go | 109 +++ pkg/eventprocessor/eventfeed/eventfeed.go | 39 ++ .../eventfeed/impl/analytics.go | 4 +- .../eventfeed/impl/eventfeed.go | 23 +- .../eventfeed/impl/eventfeed_store.go | 355 ++++++++++ .../eventfeed/impl/eventfeed_store_test.go | 89 +++ .../eventfeed/impl/eventfeed_test.go | 51 +- .../impl/eventprocessor_replayhistory_test.go | 30 +- .../impl/eventprocessor_test.go | 46 +- .../impl/executor/impl/executor.go | 9 +- .../impl/executor/impl/executor_test.go | 27 +- .../impl/executor/impl/txnscope.go | 42 ++ .../impl/txnscope_createtable_test.go | 8 +- .../impl/executor/impl/txnscope_runsql.go | 3 +- .../executor/impl/txnscope_runsql_test.go | 148 +++- pkg/nonce/impl/store.go | 69 +- pkg/nonce/impl/tracker.go | 7 +- pkg/nonce/impl/tracker_test.go | 58 +- pkg/nonce/tracker.go | 9 +- .../impl/system/internal/db/registry.sql.go | 111 --- pkg/sqlstore/impl/system/queries/registry.sql | 8 - pkg/sqlstore/impl/system/store.go | 641 ------------------ .../impl/system/store_instrumented.go | 437 ------------ pkg/sqlstore/impl/system/store_test.go | 218 ------ pkg/sqlstore/store.go | 50 -- pkg/sqlstore/table.go | 77 --- pkg/tables/impl/ethereum/client_test.go | 6 +- tests/fullstack/fullstack.go | 62 +- tests/sqlite3.go | 3 +- 82 files changed, 2078 insertions(+), 2524 deletions(-) delete mode 100644 .bingo/.gitignore delete mode 100644 .bingo/README.md delete mode 100644 .bingo/Variables.mk delete mode 100644 .bingo/bingo.mod delete mode 100644 .bingo/go.mod delete mode 100644 .bingo/kustomize.mod delete mode 100644 .bingo/variables.env create mode 100644 internal/gateway/impl/gateway_store.go rename internal/gateway/{gateway_test.go => impl/gateway_store_test.go} (55%) rename {pkg/sqlstore/impl/system => internal/gateway/impl}/rowstotabledata.go (58%) rename pkg/{sqlstore/impl/system/internal => database}/db/acl.sql.go (100%) rename pkg/{sqlstore/impl/system/internal => database}/db/db.go (91%) rename pkg/{sqlstore/impl/system/internal => database}/db/evm_events.sql.go (100%) rename pkg/{sqlstore/impl/system/internal => database}/db/id.sql.go (100%) rename pkg/{sqlstore/impl/system/internal => database}/db/models.go (100%) rename pkg/{sqlstore/impl/system/internal => database}/db/nonce.sql.go (100%) rename pkg/{sqlstore/impl/system/internal => database}/db/receipt.sql.go (100%) create mode 100644 pkg/database/db/registry.sql.go rename pkg/{sqlstore/impl/system/internal => database}/db/schema.sql.go (100%) rename pkg/{sqlstore/impl/system => database}/migrations/001_init.down.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/001_init.up.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/002_receipterroridx.down.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/002_receipterroridx.up.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/003_evm_events.down.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/003_evm_events.up.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/004_system_id.down.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/004_system_id.up.sql (100%) rename pkg/{sqlstore/impl/system => database}/migrations/migrations.go (96%) rename pkg/{sqlstore/impl/system => database}/queries/acl.sql (100%) rename pkg/{sqlstore/impl/system => database}/queries/evm_events.sql (100%) rename pkg/{sqlstore/impl/system => database}/queries/id.sql (100%) rename pkg/{sqlstore/impl/system => database}/queries/nonce.sql (100%) rename pkg/{sqlstore/impl/system => database}/queries/receipt.sql (100%) create mode 100644 pkg/database/queries/registry.sql rename pkg/{sqlstore/impl/system => database}/queries/schema.sql (100%) rename pkg/{sqlstore/impl/system => database}/schemas/schemas.sql (100%) rename pkg/{sqlstore/impl/system => database}/sqlc.yaml (96%) create mode 100644 pkg/database/sqlite_db.go create mode 100644 pkg/eventprocessor/eventfeed/impl/eventfeed_store.go create mode 100644 pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go delete mode 100644 pkg/sqlstore/impl/system/internal/db/registry.sql.go delete mode 100644 pkg/sqlstore/impl/system/queries/registry.sql delete mode 100644 pkg/sqlstore/impl/system/store.go delete mode 100644 pkg/sqlstore/impl/system/store_instrumented.go delete mode 100644 pkg/sqlstore/impl/system/store_test.go delete mode 100644 pkg/sqlstore/store.go delete mode 100644 pkg/sqlstore/table.go diff --git a/.bingo/.gitignore b/.bingo/.gitignore deleted file mode 100644 index 4f2055b6..00000000 --- a/.bingo/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ - -# Ignore everything -* - -# But not these files: -!.gitignore -!*.mod -!README.md -!Variables.mk -!variables.env - -*tmp.mod diff --git a/.bingo/README.md b/.bingo/README.md deleted file mode 100644 index 7a5c2d4f..00000000 --- a/.bingo/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Project Development Dependencies. - -This is directory which stores Go modules with pinned buildable package that is used within this repository, managed by https://github.com/bwplotka/bingo. - -* Run `bingo get` to install all tools having each own module file in this directory. -* Run `bingo get ` to install that have own module file in this directory. -* For Makefile: Make sure to put `include .bingo/Variables.mk` in your Makefile, then use $() variable where is the .bingo/.mod. -* For shell: Run `source .bingo/variables.env` to source all environment variable for each tool. -* For go: Import `.bingo/variables.go` to for variable names. -* See https://github.com/bwplotka/bingo or -h on how to add, remove or change binaries dependencies. - -## Requirements - -* Go 1.14+ diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk deleted file mode 100644 index 08588dba..00000000 --- a/.bingo/Variables.mk +++ /dev/null @@ -1,31 +0,0 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT. -# All tools are designed to be build inside $GOBIN. -BINGO_DIR := $(dir $(lastword $(MAKEFILE_LIST))) -GOPATH ?= $(shell go env GOPATH) -GOBIN ?= $(firstword $(subst :, ,${GOPATH}))/bin -GO ?= $(shell which go) - -# Below generated variables ensure that every time a tool under each variable is invoked, the correct version -# will be used; reinstalling only if needed. -# For example for bingo variable: -# -# In your main Makefile (for non array binaries): -# -#include .bingo/Variables.mk # Assuming -dir was set to .bingo . -# -#command: $(BINGO) -# @echo "Running bingo" -# @$(BINGO) -# -BINGO := $(GOBIN)/bingo-v0.5.1 -$(BINGO): $(BINGO_DIR)/bingo.mod - @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/bingo-v0.5.1" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=bingo.mod -o=$(GOBIN)/bingo-v0.5.1 "github.com/bwplotka/bingo" - -KUSTOMIZE := $(GOBIN)/kustomize-v4.4.1 -$(KUSTOMIZE): $(BINGO_DIR)/kustomize.mod - @# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies. - @echo "(re)installing $(GOBIN)/kustomize-v4.4.1" - @cd $(BINGO_DIR) && $(GO) build -mod=mod -modfile=kustomize.mod -o=$(GOBIN)/kustomize-v4.4.1 "sigs.k8s.io/kustomize/kustomize/v4" - diff --git a/.bingo/bingo.mod b/.bingo/bingo.mod deleted file mode 100644 index 1bb8dad2..00000000 --- a/.bingo/bingo.mod +++ /dev/null @@ -1,5 +0,0 @@ -module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT - -go 1.17 - -require github.com/bwplotka/bingo v0.5.1 diff --git a/.bingo/go.mod b/.bingo/go.mod deleted file mode 100644 index 610249af..00000000 --- a/.bingo/go.mod +++ /dev/null @@ -1 +0,0 @@ -module _ // Fake go.mod auto-created by 'bingo' for go -moddir compatibility with non-Go projects. Commit this file, together with other .mod files. \ No newline at end of file diff --git a/.bingo/kustomize.mod b/.bingo/kustomize.mod deleted file mode 100644 index 12379686..00000000 --- a/.bingo/kustomize.mod +++ /dev/null @@ -1,9 +0,0 @@ -module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT - -go 1.17 - -exclude sigs.k8s.io/kustomize/api v0.2.0 - -exclude sigs.k8s.io/kustomize/cmd/config v0.2.0 - -require sigs.k8s.io/kustomize/kustomize/v4 v4.4.1 diff --git a/.bingo/variables.env b/.bingo/variables.env deleted file mode 100644 index 643a840c..00000000 --- a/.bingo/variables.env +++ /dev/null @@ -1,14 +0,0 @@ -# Auto generated binary variables helper managed by https://github.com/bwplotka/bingo v0.5.1. DO NOT EDIT. -# All tools are designed to be build inside $GOBIN. -# Those variables will work only until 'bingo get' was invoked, or if tools were installed via Makefile's Variables.mk. -GOBIN=${GOBIN:=$(go env GOBIN)} - -if [ -z "$GOBIN" ]; then - GOBIN="$(go env GOPATH)/bin" -fi - - -BINGO="${GOBIN}/bingo-v0.5.1" - -KUSTOMIZE="${GOBIN}/kustomize-v4.4.1" - diff --git a/.golangci.yml b/.golangci.yml index d7331330..46567683 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -59,5 +59,5 @@ run: timeout: 30m skip-dirs: - - "pkg/sqlstore/impl/system/internal/db" + - "pkg/database/db" - "internal/router/controllers/apiv1" diff --git a/Makefile b/Makefile index 4b39124e..0d833cfa 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,3 @@ -include .bingo/Variables.mk - HEAD_SHORT ?= $(shell git rev-parse --short HEAD) PLATFORM ?= $(shell uname -m) @@ -31,9 +29,9 @@ ethereum-testerc721a: $(ABIGEN) --abi ./pkg/tables/impl/ethereum/test/erc721aQueryable/abi.json --pkg erc721aQueryable --type Contract --out pkg/tables/impl/ethereum/test/erc721aQueryable/erc721aQueryable.go --bin pkg/tables/impl/ethereum/test/erc721aQueryable/bytecode.bin .PHONY: ethereum-testerc721a -system-sql-assets: - cd pkg/sqlstore/impl/system && $(GO_BINDATA) -pkg migrations -prefix migrations/ -o migrations/migrations.go -ignore=migrations.go migrations && $(SQLC) generate; cd -; -.PHONY: system-sql-assets +database-assets: + cd pkg/database && $(GO_BINDATA) -pkg migrations -prefix migrations/ -o migrations/migrations.go -ignore=migrations.go migrations && $(SQLC) generate; cd -; +.PHONY: database-assets mocks: clean-mocks go run github.com/vektra/mockery/v2@v2.14.0 --name='\b(?:Gateway)\b' --recursive --with-expecter diff --git a/cmd/api/main.go b/cmd/api/main.go index 6a4cd049..aa198f75 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -8,25 +8,31 @@ import ( "fmt" "net/http" "path" + "strings" "sync" "time" - "github.com/XSAM/otelsql" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" + "github.com/google/uuid" _ "github.com/mattn/go-sqlite3" "github.com/rs/zerolog/log" "github.com/textileio/cli" "github.com/textileio/go-tableland/buildinfo" "github.com/textileio/go-tableland/internal/chains" "github.com/textileio/go-tableland/internal/gateway" + gatewayimpl "github.com/textileio/go-tableland/internal/gateway/impl" "github.com/textileio/go-tableland/internal/router" "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/internal/tableland/impl" "github.com/textileio/go-tableland/pkg/backup" "github.com/textileio/go-tableland/pkg/backup/restorer" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" + + "go.opentelemetry.io/otel/attribute" + efimpl "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed/impl" epimpl "github.com/textileio/go-tableland/pkg/eventprocessor/impl" executor "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor/impl" @@ -35,16 +41,14 @@ import ( nonceimpl "github.com/textileio/go-tableland/pkg/nonce/impl" "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" + "github.com/textileio/go-tableland/pkg/sharedmemory" - "github.com/textileio/go-tableland/pkg/sqlstore" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/telemetry" "github.com/textileio/go-tableland/pkg/telemetry/chainscollector" "github.com/textileio/go-tableland/pkg/telemetry/publisher" "github.com/textileio/go-tableland/pkg/telemetry/storage" "github.com/textileio/go-tableland/pkg/wallet" - "go.opentelemetry.io/otel/attribute" ) type moduleCloser func(ctx context.Context) error @@ -68,6 +72,16 @@ func main() { path.Join(dirPath, "database.db"), ) + serializableDB, err := database.OpenSerializable(databaseURL, attribute.String("database", "main")) + if err != nil { + log.Fatal().Err(err).Msg("opening the database") + } + + concurrentDB, err := database.OpenConcurrent(databaseURL, attribute.String("database", "main")) + if err != nil { + log.Fatal().Err(err).Msg("opening the read database") + } + // Restore provided backup (if configured). if config.BootstrapBackupURL != "" { if err := restoreBackup(databaseURL, config.BootstrapBackupURL); err != nil { @@ -85,7 +99,7 @@ func main() { // Chain stacks. chainStacks, closeChainStacks, err := createChainStacks( - databaseURL, + serializableDB, parser, sm, config.Chains, @@ -95,17 +109,8 @@ func main() { log.Fatal().Err(err).Msg("creating chains stack") } - eps := make(map[tableland.ChainID]eventprocessor.EventProcessor, len(chainStacks)) - for chainID, stack := range chainStacks { - eps[chainID] = stack.EventProcessor - } - - for _, stack := range chainStacks { - stack.Store.SetReadResolver(parsing.NewReadStatementResolver(sm)) - } - // HTTP API server. - closeHTTPServer, err := createAPIServer(config.HTTP, config.Gateway, parser, chainStacks) + closeHTTPServer, err := createAPIServer(config.HTTP, config.Gateway, parser, concurrentDB, sm, chainStacks) if err != nil { log.Fatal().Err(err).Msg("creating HTTP server") } @@ -120,7 +125,7 @@ func main() { } // Telemetry - closeTelemetryModule, err := configureTelemetry(dirPath, chainStacks, config.TelemetryPublisher) + closeTelemetryModule, err := configureTelemetry(dirPath, concurrentDB, chainStacks, config.TelemetryPublisher) if err != nil { log.Fatal().Err(err).Msg("configuring telemetry") } @@ -147,6 +152,16 @@ func main() { log.Error().Err(err).Msg("closing backuper") } + // Close serializable database + if err := serializableDB.Close(); err != nil { + log.Error().Err(err).Msg("closing serializable db backuper") + } + + // Close concurrent database + if err := concurrentDB.Close(); err != nil { + log.Error().Err(err).Msg("closing concurrent db backuper") + } + // Close telemetry. if err := closeTelemetryModule(ctx); err != nil { log.Error().Err(err).Msg("closing telemetry module") @@ -156,23 +171,12 @@ func main() { func createChainIDStack( config ChainConfig, - dbURI string, - executorsDB *sql.DB, + db *database.SQLiteDB, parser parsing.SQLValidator, sm *sharedmemory.SharedMemory, tableConstraints TableConstraints, fetchExtraBlockInfo bool, ) (chains.ChainStack, error) { - store, err := system.New(dbURI, config.ChainID) - if err != nil { - return chains.ChainStack{}, fmt.Errorf("failed initialize sqlstore: %s", err) - } - - systemStore, err := system.NewInstrumentedSystemStore(config.ChainID, store) - if err != nil { - return chains.ChainStack{}, fmt.Errorf("instrumenting system store: %s", err) - } - conn, err := ethclient.Dial(config.Registry.EthEndpoint) if err != nil { return chains.ChainStack{}, fmt.Errorf("failed to connect to ethereum endpoint: %s", err) @@ -200,7 +204,7 @@ func createChainIDStack( tracker, err := nonceimpl.NewLocalTracker( ctxLocalTracker, wallet, - nonceimpl.NewNonceStore(systemStore), + nonceimpl.NewNonceStore(db), config.ChainID, conn, checkInterval, @@ -211,11 +215,7 @@ func createChainIDStack( return chains.ChainStack{}, fmt.Errorf("failed to create new tracker: %s", err) } - scAddress := common.HexToAddress(config.Registry.ContractAddress) - - acl := impl.NewACL(systemStore) - - ex, err := executor.NewExecutor(config.ChainID, executorsDB, parser, tableConstraints.MaxRowCount, acl) + ex, err := executor.NewExecutor(config.ChainID, db, parser, tableConstraints.MaxRowCount, impl.NewACL(db)) if err != nil { return chains.ChainStack{}, fmt.Errorf("creating txn processor: %s", err) } @@ -234,7 +234,20 @@ func createChainIDStack( eventfeed.WithEventPersistence(config.EventFeed.PersistEvents), eventfeed.WithFetchExtraBlockInformation(fetchExtraBlockInfo), } - ef, err := efimpl.New(systemStore, config.ChainID, conn, scAddress, sm, efOpts...) + + eventFeedStore, err := efimpl.NewInstrumentedEventFeedStore(db) + if err != nil { + return chains.ChainStack{}, fmt.Errorf("creating event feed store: %s", err) + } + + ef, err := efimpl.New( + eventFeedStore, + config.ChainID, + conn, + common.HexToAddress(config.Registry.ContractAddress), + sm, + efOpts..., + ) if err != nil { return chains.ChainStack{}, fmt.Errorf("creating event feed: %s", err) } @@ -255,7 +268,6 @@ func createChainIDStack( return chains.ChainStack{}, fmt.Errorf("starting event processor: %s", err) } return chains.ChainStack{ - Store: systemStore, EventProcessor: ep, Close: func(ctx context.Context) error { log.Info().Int64("chain_id", int64(config.ChainID)).Msg("closing stack...") @@ -264,9 +276,6 @@ func createChainIDStack( ep.Stop() tracker.Close() conn.Close() - if err := systemStore.Close(); err != nil { - return fmt.Errorf("closing system store for chain_id %d: %s", config.ChainID, err) - } return nil }, }, nil @@ -274,20 +283,22 @@ func createChainIDStack( func configureTelemetry( dirPath string, + db *database.SQLiteDB, chainStacks map[tableland.ChainID]chains.ChainStack, config TelemetryPublisherConfig, ) (moduleCloser, error) { - var nodeID string - var err error - for chainID := range chainStacks { - nodeID, err = chainStacks[chainID].Store.GetID(context.Background()) - if err != nil { - return nil, fmt.Errorf("get node ID: %s", err) + nodeID, err := db.Queries.GetId(context.Background()) + if err == sql.ErrNoRows { + nodeID = strings.Replace(uuid.NewString(), "-", "", -1) + if err := db.Queries.InsertId(context.Background(), nodeID); err != nil { + log.Fatal().Err(err).Msg("failed to insert id") } - log.Info().Str("node_id", nodeID).Msg("node info") - break + } else if err != nil { + log.Fatal().Err(err).Msg("failed to get id") } + log.Info().Str("node_id", nodeID).Msg("node info") + // Wiring metricsDatabaseURL := fmt.Sprintf( "file://%s?_busy_timeout=5000&_foreign_keys=on&_journal_mode=WAL", @@ -395,25 +406,13 @@ func createParser(queryConstraints QueryConstraints) (parsing.SQLValidator, erro } func createChainStacks( - databaseURL string, + db *database.SQLiteDB, parser parsing.SQLValidator, sm *sharedmemory.SharedMemory, chainsConfig []ChainConfig, tableConstraintsConfig TableConstraints, fetchExtraBlockInfo bool, ) (map[tableland.ChainID]chains.ChainStack, moduleCloser, error) { - executorsDB, err := otelsql.Open("sqlite3", databaseURL) - if err != nil { - return nil, nil, fmt.Errorf("opening database: %s", err) - } - executorsDB.SetMaxOpenConns(1) - attrs := append([]attribute.KeyValue{attribute.String("name", "executors")}, metrics.BaseAttrs...) - if err := otelsql.RegisterDBStatsMetrics( - executorsDB, - otelsql.WithAttributes(attrs...)); err != nil { - return nil, nil, fmt.Errorf("registering executors db stats: %s", err) - } - chainStacks := map[tableland.ChainID]chains.ChainStack{} for _, chainCfg := range chainsConfig { if _, ok := chainStacks[chainCfg.ChainID]; ok { @@ -421,8 +420,7 @@ func createChainStacks( } chainStack, err := createChainIDStack( chainCfg, - databaseURL, - executorsDB, + db, parser, sm, tableConstraintsConfig, @@ -450,10 +448,6 @@ func createChainStacks( } wg.Wait() - // Close Executor DB. - if err := executorsDB.Close(); err != nil { - return fmt.Errorf("closing executors db: %s", err) - } return nil } @@ -464,17 +458,20 @@ func createAPIServer( httpConfig HTTPConfig, gatewayConfig GatewayConfig, parser parsing.SQLValidator, + db *database.SQLiteDB, + sm *sharedmemory.SharedMemory, chainStacks map[tableland.ChainID]chains.ChainStack, ) (moduleCloser, error) { supportedChainIDs := make([]tableland.ChainID, 0, len(chainStacks)) - stores := make(map[tableland.ChainID]sqlstore.SystemStore, len(chainStacks)) + eps := make(map[tableland.ChainID]eventprocessor.EventProcessor, len(chainStacks)) for chainID, stack := range chainStacks { - stores[chainID] = stack.Store + eps[chainID] = stack.EventProcessor supportedChainIDs = append(supportedChainIDs, chainID) } + g, err := gateway.NewGateway( parser, - stores, + gatewayimpl.NewGatewayStore(db, parsing.NewReadStatementResolver(sm)), gatewayConfig.ExternalURIPrefix, gatewayConfig.MetadataRendererURI, gatewayConfig.AnimationRendererURI) diff --git a/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json b/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json index 94895f11..3ee09a1a 100644 --- a/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json +++ b/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json @@ -1300,7 +1300,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (http_status_code, http_server_name) (rate(http_server_request_count_total{service_name=\"tableland:api\"}[10m]))", + "expr": "sum by (http_status_code) (rate(http_server_request_count_total{service_name=\"tableland:api\"}[10m]))", "interval": "", "legendFormat": "{{http_status_code}}-{{http_server_name}}", "range": true, @@ -1411,7 +1411,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (http_server_name) (\n rate(http_server_request_count_total{service_name=\"tableland:api\"}[5m])\n)", + "expr": "sum by (service_name) (rate(http_server_request_count_total{service_name=\"tableland:api\"}[5m]))", "interval": "", "legendFormat": "__auto", "range": true, @@ -1477,7 +1477,32 @@ }, "unit": "ms" }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{}[5m])) by (le))" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 8, @@ -1506,7 +1531,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{}[5m])) by (http_server_name, le))", + "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{}[5m])) by (service_name, le))", "interval": "", "legendFormat": "__auto", "range": true, @@ -1617,7 +1642,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (http_server_name) ( rate(http_server_response_content_length_total[$__rate_interval]))", + "expr": "rate(http_server_response_content_length_total[$__rate_interval])", "interval": "", "legendFormat": "{{http_status_code}}", "range": true, @@ -1629,28 +1654,15 @@ }, { "collapsed": false, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, "gridPos": { "h": 1, "w": 24, "x": 0, "y": 41 }, - "id": 12, + "id": 87, "panels": [], - "targets": [ - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "refId": "A" - } - ], - "title": "SQL Database", + "title": "Gateway", "type": "row" }, { @@ -1737,7 +1749,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (method) (\n rate(tableland_sqlstore_call_latency_count{service_name=\"tableland:api\"}[5m])\n)", + "expr": "sum by (method) (\n rate(tableland_gateway_call_latency_count{service_name=\"tableland:api\"}[5m])\n)", "interval": "", "legendFormat": "{{method}}", "range": true, @@ -1833,8 +1845,9 @@ "type": "prometheus", "uid": "P1809F7CD0C75ACF3" }, + "editorMode": "code", "exemplar": false, - "expr": "histogram_quantile(0.95, sum(rate(tableland_sqlstore_call_latency_bucket{service_name=\"tableland:api\"}[5m])) by (le, method))", + "expr": "histogram_quantile(0.95, sum(rate(tableland_gateway_call_latency_bucket{service_name=\"tableland:api\"}[5m])) by (le, method))", "format": "heatmap", "instant": false, "interval": "", @@ -1845,6 +1858,216 @@ "title": "Methods 95-tile latency", "type": "timeseries" }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 50 + }, + "id": 12, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "SQL Database", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P1809F7CD0C75ACF3" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 51 + }, + "id": 89, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P1809F7CD0C75ACF3" + }, + "editorMode": "code", + "expr": "sum by (chainID, method) (\n rate(tableland_eventfeed_store_call_count_total{service_name=\"tableland:api\"}[5m])\n)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Event Feed Store Calls", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P1809F7CD0C75ACF3" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 51 + }, + "id": 91, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P1809F7CD0C75ACF3" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.95, sum(rate(tableland_eventfeed_store_latency_bucket{service_name=\"tableland:api\"}[5m])) by (le, chainID, method))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Event Feed Store Latency", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -1908,8 +2131,8 @@ "gridPos": { "h": 8, "w": 6, - "x": 12, - "y": 42 + "x": 0, + "y": 59 }, "id": 74, "maxDataPoints": 25, @@ -1933,24 +2156,12 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (chain_id, name, status) (db_sql_connection_open{name=~\"processor|systemstore\"})", + "expr": "sum by ( name, status) (db_sql_connection_open{database=\"main\"})", "format": "time_series", "instant": false, "interval": "", "legendFormat": "{{chain_id}}-{{name}}-{{status}}", "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P1809F7CD0C75ACF3" - }, - "editorMode": "code", - "expr": "sum by (name, status) (db_sql_connection_open{name=~\"userstore\"})", - "hide": false, - "legendFormat": "{{name}}-{{status}}", - "range": true, - "refId": "B" } ], "title": "Open connections", @@ -2019,8 +2230,8 @@ "gridPos": { "h": 8, "w": 6, - "x": 18, - "y": 42 + "x": 6, + "y": 59 }, "id": 75, "maxDataPoints": 25, @@ -2044,24 +2255,12 @@ }, "editorMode": "code", "exemplar": false, - "expr": "histogram_quantile(0.95, sum(rate(db_sql_latency_milliseconds_bucket{name=~\"processor|systemstore\"}[$__rate_interval])) by (chain_id, name, le))", + "expr": "histogram_quantile(0.95, sum(rate(db_sql_latency_milliseconds_bucket{database=\"main\"}[$__rate_interval])) by ( name, le))", "format": "heatmap", "instant": false, "interval": "", "legendFormat": "{{chain_id}}-{{name}}", "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P1809F7CD0C75ACF3" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.95, sum(rate(db_sql_latency_milliseconds_bucket{name=~\"userstore\"}[$__rate_interval])) by (chain_id, name, le))", - "hide": false, - "legendFormat": "{{name}}", - "range": true, - "refId": "B" } ], "title": "Overall SQL query 95-th latency", @@ -2077,7 +2276,7 @@ "h": 1, "w": 24, "x": 0, - "y": 50 + "y": 67 }, "id": 4, "panels": [], @@ -2156,7 +2355,7 @@ "h": 8, "w": 6, "x": 0, - "y": 51 + "y": 68 }, "id": 72, "options": { @@ -2252,7 +2451,7 @@ "h": 8, "w": 6, "x": 6, - "y": 51 + "y": 68 }, "id": 76, "options": { @@ -2348,7 +2547,7 @@ "h": 8, "w": 6, "x": 12, - "y": 51 + "y": 68 }, "id": 77, "options": { @@ -2444,7 +2643,7 @@ "h": 8, "w": 6, "x": 18, - "y": 51 + "y": 68 }, "id": 78, "options": { @@ -2486,13 +2685,13 @@ "list": [] }, "time": { - "from": "now-15m", + "from": "now-30m", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Validator", "uid": "2Le7qt_7z", - "version": 8, + "version": 7, "weekStart": "" } \ No newline at end of file diff --git a/internal/chains/chains.go b/internal/chains/chains.go index c753f28f..604b6d26 100644 --- a/internal/chains/chains.go +++ b/internal/chains/chains.go @@ -4,12 +4,10 @@ import ( "context" "github.com/textileio/go-tableland/pkg/eventprocessor" - "github.com/textileio/go-tableland/pkg/sqlstore" ) // ChainStack contains components running for a specific ChainID. type ChainStack struct { - Store sqlstore.SystemStore EventProcessor eventprocessor.EventProcessor // close gracefully closes all the chain stack components. Close func(ctx context.Context) error diff --git a/internal/formatter/formatter.go b/internal/formatter/formatter.go index 55cf8899..3da40520 100644 --- a/internal/formatter/formatter.go +++ b/internal/formatter/formatter.go @@ -5,7 +5,7 @@ import ( "encoding/json" "fmt" - "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/internal/gateway" ) // Output is used to control the output format of a query specified with the "output" query param. @@ -64,7 +64,7 @@ func WithExtract(extract bool) FormatOption { } // Format transforms the user rows according to the provided configuration, retuning raw json or jsonl bytes. -func Format(userRows *tableland.TableData, opts ...FormatOption) ([]byte, FormatConfig, error) { +func Format(userRows *gateway.TableData, opts ...FormatOption) ([]byte, FormatConfig, error) { c := FormatConfig{ Output: Objects, } @@ -105,7 +105,7 @@ func Format(userRows *tableland.TableData, opts ...FormatOption) ([]byte, Format return unwrapped, c, nil } -func toObjects(in *tableland.TableData) []interface{} { +func toObjects(in *gateway.TableData) []interface{} { objects := make([]interface{}, len(in.Rows)) for i, row := range in.Rows { object := make(map[string]interface{}, len(row)) diff --git a/internal/formatter/formatter_test.go b/internal/formatter/formatter_test.go index f0360a8c..8a32b332 100644 --- a/internal/formatter/formatter_test.go +++ b/internal/formatter/formatter_test.go @@ -5,46 +5,46 @@ import ( "testing" "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/internal/gateway" ) var rawJSON = []byte("{\"city\":\"dallas\"}") -var input = &tableland.TableData{ - Columns: []tableland.Column{ +var input = &gateway.TableData{ + Columns: []gateway.Column{ {Name: "name"}, {Name: "age"}, {Name: "location"}, }, - Rows: [][]*tableland.ColumnValue{ - {tableland.OtherColValue("bob"), tableland.OtherColValue(40), tableland.JSONColValue(rawJSON)}, - {tableland.OtherColValue("jane"), tableland.OtherColValue(30), tableland.JSONColValue(rawJSON)}, + Rows: [][]*gateway.ColumnValue{ + {gateway.OtherColValue("bob"), gateway.OtherColValue(40), gateway.JSONColValue(rawJSON)}, + {gateway.OtherColValue("jane"), gateway.OtherColValue(30), gateway.JSONColValue(rawJSON)}, }, } -var inputExtractable = &tableland.TableData{ - Columns: []tableland.Column{ +var inputExtractable = &gateway.TableData{ + Columns: []gateway.Column{ {Name: "name"}, }, - Rows: [][]*tableland.ColumnValue{ - {tableland.OtherColValue("bob")}, - {tableland.OtherColValue("jane")}, + Rows: [][]*gateway.ColumnValue{ + {gateway.OtherColValue("bob")}, + {gateway.OtherColValue("jane")}, }, } -var inputExtractable2 = &tableland.TableData{ - Columns: []tableland.Column{ +var inputExtractable2 = &gateway.TableData{ + Columns: []gateway.Column{ {Name: "location"}, }, - Rows: [][]*tableland.ColumnValue{ - {tableland.JSONColValue(rawJSON)}, - {tableland.JSONColValue(rawJSON)}, + Rows: [][]*gateway.ColumnValue{ + {gateway.JSONColValue(rawJSON)}, + {gateway.JSONColValue(rawJSON)}, }, } func TestFormat(t *testing.T) { type args struct { - userRows *tableland.TableData + userRows *gateway.TableData output Output unwrap bool extract bool diff --git a/internal/gateway/gateway.go b/internal/gateway/gateway.go index 2dd822da..33e226c2 100644 --- a/internal/gateway/gateway.go +++ b/internal/gateway/gateway.go @@ -4,30 +4,23 @@ import ( "context" "database/sql" "encoding/base64" + "encoding/json" "errors" "fmt" "net/url" "strings" + "time" "github.com/ethereum/go-ethereum/common" logger "github.com/rs/zerolog/log" - "github.com/textileio/go-tableland/internal/router/middlewares" "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/pkg/parsing" - "github.com/textileio/go-tableland/pkg/sqlstore" "github.com/textileio/go-tableland/pkg/tables" ) // ErrTableNotFound indicates that the table doesn't exist. var ErrTableNotFound = errors.New("table not found") -// Gateway defines the gateway operations. -type Gateway interface { - RunReadQuery(ctx context.Context, stmt string) (*tableland.TableData, error) - GetTableMetadata(context.Context, tables.TableID) (sqlstore.TableMetadata, error) - GetReceiptByTransactionHash(context.Context, common.Hash) (sqlstore.Receipt, bool, error) -} - var log = logger.With().Str("component", "gateway").Logger() const ( @@ -38,13 +31,28 @@ const ( DefaultAnimationURL = "" ) +// Gateway defines the gateway operations. +type Gateway interface { + RunReadQuery(ctx context.Context, stmt string) (*TableData, error) + GetTableMetadata(context.Context, tableland.ChainID, tables.TableID) (TableMetadata, error) + GetReceiptByTransactionHash(context.Context, tableland.ChainID, common.Hash) (Receipt, bool, error) +} + +// GatewayStore is the storage layer of the Gateway. +type GatewayStore interface { + Read(context.Context, parsing.ReadStmt) (*TableData, error) + GetTable(context.Context, tableland.ChainID, tables.TableID) (Table, error) + GetSchemaByTableName(context.Context, string) (TableSchema, error) + GetReceipt(context.Context, tableland.ChainID, string) (Receipt, bool, error) +} + // GatewayService implements the Gateway interface using SQLStore. type GatewayService struct { parser parsing.SQLValidator extURLPrefix string metadataRendererURI string animationRendererURI string - stores map[tableland.ChainID]sqlstore.SystemStore + store GatewayStore } var _ (Gateway) = (*GatewayService)(nil) @@ -52,7 +60,7 @@ var _ (Gateway) = (*GatewayService)(nil) // NewGateway creates a new gateway service. func NewGateway( parser parsing.SQLValidator, - stores map[tableland.ChainID]sqlstore.SystemStore, + store GatewayStore, extURLPrefix string, metadataRendererURI string, animationRendererURI string, @@ -80,49 +88,43 @@ func NewGateway( extURLPrefix: extURLPrefix, metadataRendererURI: metadataRendererURI, animationRendererURI: animationRendererURI, - stores: stores, + store: store, }, nil } // GetTableMetadata returns table's metadata fetched from SQLStore. -func (g *GatewayService) GetTableMetadata(ctx context.Context, id tables.TableID) (sqlstore.TableMetadata, error) { - chainID, store, err := g.getStore(ctx) - if err != nil { - return sqlstore.TableMetadata{ - ExternalURL: fmt.Sprintf("%s/api/v1/tables/%d/%s", g.extURLPrefix, chainID, id), - Image: g.emptyMetadataImage(), - Message: "Chain isn't supported", - }, nil - } - table, err := store.GetTable(ctx, id) +func (g *GatewayService) GetTableMetadata( + ctx context.Context, chainID tableland.ChainID, id tables.TableID, +) (TableMetadata, error) { + table, err := g.store.GetTable(ctx, chainID, id) if err != nil { if !errors.Is(err, sql.ErrNoRows) { log.Error().Err(err).Msg("error fetching the table") - return sqlstore.TableMetadata{ + return TableMetadata{ ExternalURL: fmt.Sprintf("%s/api/v1/tables/%d/%s", g.extURLPrefix, chainID, id), Image: g.emptyMetadataImage(), Message: "Failed to fetch the table", }, nil } - return sqlstore.TableMetadata{ + return TableMetadata{ ExternalURL: fmt.Sprintf("%s/api/v1/tables/%d/%s", g.extURLPrefix, chainID, id), Image: g.emptyMetadataImage(), Message: "Table not found", }, ErrTableNotFound } tableName := fmt.Sprintf("%s_%d_%s", table.Prefix, table.ChainID, table.ID) - schema, err := store.GetSchemaByTableName(ctx, tableName) + schema, err := g.store.GetSchemaByTableName(ctx, tableName) if err != nil { - return sqlstore.TableMetadata{}, fmt.Errorf("get table schema information: %s", err) + return TableMetadata{}, fmt.Errorf("get table schema information: %s", err) } - return sqlstore.TableMetadata{ + return TableMetadata{ Name: tableName, ExternalURL: fmt.Sprintf("%s/api/v1/tables/%d/%s", g.extURLPrefix, table.ChainID, table.ID), Image: g.getMetadataImage(table.ChainID, table.ID), AnimationURL: g.getAnimationURL(table.ChainID, table.ID), - Attributes: []sqlstore.TableMetadataAttribute{ + Attributes: []TableMetadataAttribute{ { DisplayType: "date", TraitType: "created", @@ -135,22 +137,16 @@ func (g *GatewayService) GetTableMetadata(ctx context.Context, id tables.TableID // GetReceiptByTransactionHash returns a receipt by transaction hash. func (g *GatewayService) GetReceiptByTransactionHash( - ctx context.Context, - txnHash common.Hash, -) (sqlstore.Receipt, bool, error) { - _, store, err := g.getStore(ctx) - if err != nil { - return sqlstore.Receipt{}, false, fmt.Errorf("chain not found: %s", err) - } - - receipt, exists, err := store.GetReceipt(ctx, txnHash.Hex()) + ctx context.Context, chainID tableland.ChainID, txnHash common.Hash, +) (Receipt, bool, error) { + receipt, exists, err := g.store.GetReceipt(ctx, chainID, txnHash.Hex()) if err != nil { - return sqlstore.Receipt{}, false, fmt.Errorf("transaction receipt lookup: %s", err) + return Receipt{}, false, fmt.Errorf("transaction receipt lookup: %s", err) } if !exists { - return sqlstore.Receipt{}, false, nil + return Receipt{}, false, nil } - return sqlstore.Receipt{ + return Receipt{ ChainID: receipt.ChainID, BlockNumber: receipt.BlockNumber, IndexInBlock: receipt.IndexInBlock, @@ -162,46 +158,19 @@ func (g *GatewayService) GetReceiptByTransactionHash( } // RunReadQuery allows the user to run SQL. -func (g *GatewayService) RunReadQuery(ctx context.Context, statement string) (*tableland.TableData, error) { +func (g *GatewayService) RunReadQuery(ctx context.Context, statement string) (*TableData, error) { readStmt, err := g.parser.ValidateReadQuery(statement) if err != nil { - return nil, fmt.Errorf("validating query: %s", err) + return nil, fmt.Errorf("validating read query: %s", err) } - queryResult, err := g.runSelect(ctx, readStmt) + queryResult, err := g.store.Read(ctx, readStmt) if err != nil { return nil, fmt.Errorf("running read statement: %s", err) } return queryResult, nil } -func (g *GatewayService) runSelect(ctx context.Context, stmt parsing.ReadStmt) (*tableland.TableData, error) { - var store sqlstore.SystemStore - for _, store = range g.stores { - break - } - - queryResult, err := store.Read(ctx, stmt) - if err != nil { - return nil, fmt.Errorf("executing read-query: %s", err) - } - - return queryResult, nil -} - -func (g *GatewayService) getStore(ctx context.Context) (tableland.ChainID, sqlstore.SystemStore, error) { - ctxChainID := ctx.Value(middlewares.ContextKeyChainID) - chainID, ok := ctxChainID.(tableland.ChainID) - if !ok { - return 0, nil, errors.New("no chain id found in context") - } - store, ok := g.stores[chainID] - if !ok { - return 0, nil, fmt.Errorf("chain id %d isn't supported in the validator", chainID) - } - return chainID, store, nil -} - func (g *GatewayService) getMetadataImage(chainID tableland.ChainID, tableID tables.TableID) string { if g.metadataRendererURI == "" { return DefaultMetadataImage @@ -223,3 +192,126 @@ func (g *GatewayService) emptyMetadataImage() string { svgEncoded := base64.StdEncoding.EncodeToString([]byte(svg)) return fmt.Sprintf("data:image/svg+xml;base64,%s", svgEncoded) } + +// Receipt represents a Tableland receipt. +type Receipt struct { + ChainID tableland.ChainID + BlockNumber int64 + IndexInBlock int64 + TxnHash string + + TableID *tables.TableID + Error *string + ErrorEventIdx *int +} + +// Table represents a system-wide table stored in Tableland. +type Table struct { + ID tables.TableID `json:"id"` // table id + ChainID tableland.ChainID `json:"chain_id"` + Controller string `json:"controller"` // controller address + Prefix string `json:"prefix"` + Structure string `json:"structure"` + CreatedAt time.Time `json:"created_at"` +} + +// Name returns table's full name. +func (t Table) Name() string { + return fmt.Sprintf("%s_%d_%s", t.Prefix, t.ChainID, t.ID) +} + +// TableSchema represents the schema of a table. +type TableSchema struct { + Columns []ColumnSchema + TableConstraints []string +} + +// ColumnSchema represents the schema of a column. +type ColumnSchema struct { + Name string + Type string + Constraints []string +} + +// TableMetadata represents table metadata (OpenSea standard). +type TableMetadata struct { + Name string `json:"name,omitempty"` + ExternalURL string `json:"external_url"` + Image string `json:"image"` + Message string `json:"message,omitempty"` + AnimationURL string `json:"animation_url,omitempty"` + Attributes []TableMetadataAttribute `json:"attributes,omitempty"` + Schema TableSchema `json:"schema"` +} + +// TableMetadataAttribute represents the table metadata attribute. +type TableMetadataAttribute struct { + DisplayType string `json:"display_type"` + TraitType string `json:"trait_type"` + Value interface{} `json:"value"` +} + +// Column defines a column in table data. +type Column struct { + Name string `json:"name"` +} + +// TableData defines a tabular representation of query results. +type TableData struct { + Columns []Column `json:"columns"` + Rows [][]*ColumnValue `json:"rows"` +} + +// ColumnValue wraps data from the db that may be raw json or any other value. +type ColumnValue struct { + jsonValue json.RawMessage + otherValue interface{} +} + +// Value returns the underlying value. +func (cv *ColumnValue) Value() interface{} { + if cv.jsonValue != nil { + return cv.jsonValue + } + return cv.otherValue +} + +// Scan implements Scan. +func (cv *ColumnValue) Scan(src interface{}) error { + cv.jsonValue = nil + cv.otherValue = nil + switch src := src.(type) { + case string: + trimmed := strings.TrimLeft(src, " ") + if (strings.HasPrefix(trimmed, "{") || strings.HasPrefix(trimmed, "[")) && json.Valid([]byte(src)) { + cv.jsonValue = []byte(src) + } else { + cv.otherValue = src + } + case []byte: + tmp := make([]byte, len(src)) + copy(tmp, src) + cv.otherValue = tmp + default: + cv.otherValue = src + } + return nil +} + +// MarshalJSON implements MarshalJSON. +func (cv *ColumnValue) MarshalJSON() ([]byte, error) { + if cv.jsonValue != nil { + return cv.jsonValue, nil + } + return json.Marshal(cv.otherValue) +} + +// JSONColValue creates a UserValue with the provided json. +func JSONColValue(v json.RawMessage) *ColumnValue { + return &ColumnValue{jsonValue: v} +} + +// OtherColValue creates a UserValue with the provided other value. +func OtherColValue(v interface{}) *ColumnValue { + return &ColumnValue{otherValue: v} +} diff --git a/internal/gateway/gateway_instrumented.go b/internal/gateway/gateway_instrumented.go index edf4a3eb..9bb05ed8 100644 --- a/internal/gateway/gateway_instrumented.go +++ b/internal/gateway/gateway_instrumented.go @@ -6,17 +6,15 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/textileio/go-tableland/internal/router/middlewares" "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/pkg/metrics" - "github.com/textileio/go-tableland/pkg/sqlstore" "github.com/textileio/go-tableland/pkg/tables" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric/global" "go.opentelemetry.io/otel/metric/instrument" ) -// InstrumentedGateway implements the Gateway interface using SQLStore. +// InstrumentedGateway implements an instrumented Gateway. type InstrumentedGateway struct { gateway Gateway callCount instrument.Int64Counter @@ -28,11 +26,11 @@ var _ (Gateway) = (*InstrumentedGateway)(nil) // NewInstrumentedGateway creates a new InstrumentedGateway. func NewInstrumentedGateway(gateway Gateway) (Gateway, error) { meter := global.MeterProvider().Meter("tableland") - callCount, err := meter.Int64Counter("tableland.system.call.count") + callCount, err := meter.Int64Counter("tableland.gateway.call.count") if err != nil { return &InstrumentedGateway{}, fmt.Errorf("registering call counter: %s", err) } - latencyHistogram, err := meter.Int64Histogram("tableland.system.call.latency") + latencyHistogram, err := meter.Int64Histogram("tableland.gateway.call.latency") if err != nil { return &InstrumentedGateway{}, fmt.Errorf("registering latency histogram: %s", err) } @@ -40,15 +38,13 @@ func NewInstrumentedGateway(gateway Gateway) (Gateway, error) { return &InstrumentedGateway{gateway, callCount, latencyHistogram}, nil } -// GetReceiptByTransactionHash implements system.SystemService. +// GetReceiptByTransactionHash implements gateway.Gateway. func (g *InstrumentedGateway) GetReceiptByTransactionHash( - ctx context.Context, - hash common.Hash, -) (sqlstore.Receipt, bool, error) { + ctx context.Context, chainID tableland.ChainID, hash common.Hash, +) (Receipt, bool, error) { start := time.Now() - receipt, exists, err := g.gateway.GetReceiptByTransactionHash(ctx, hash) + receipt, exists, err := g.gateway.GetReceiptByTransactionHash(ctx, chainID, hash) latency := time.Since(start).Milliseconds() - chainID, _ := ctx.Value(middlewares.ContextKeyChainID).(tableland.ChainID) attributes := append([]attribute.KeyValue{ {Key: "method", Value: attribute.StringValue("GetReceiptByTransactionHash")}, @@ -65,14 +61,13 @@ func (g *InstrumentedGateway) GetReceiptByTransactionHash( // GetTableMetadata returns table's metadata fetched from SQLStore. func (g *InstrumentedGateway) GetTableMetadata( ctx context.Context, + chainID tableland.ChainID, id tables.TableID, -) (sqlstore.TableMetadata, error) { +) (TableMetadata, error) { start := time.Now() - metadata, err := g.gateway.GetTableMetadata(ctx, id) + metadata, err := g.gateway.GetTableMetadata(ctx, chainID, id) latency := time.Since(start).Milliseconds() - chainID, _ := ctx.Value(middlewares.ContextKeyChainID).(tableland.ChainID) - // NOTE: we may face a risk of high-cardilatity in the future. This should be revised. attributes := append([]attribute.KeyValue{ {Key: "method", Value: attribute.StringValue("GetTableMetadata")}, {Key: "success", Value: attribute.BoolValue(err == nil)}, @@ -86,16 +81,14 @@ func (g *InstrumentedGateway) GetTableMetadata( } // RunReadQuery allows the user to run SQL. -func (g *InstrumentedGateway) RunReadQuery(ctx context.Context, statement string) (*tableland.TableData, error) { +func (g *InstrumentedGateway) RunReadQuery(ctx context.Context, statement string) (*TableData, error) { start := time.Now() data, err := g.gateway.RunReadQuery(ctx, statement) latency := time.Since(start).Milliseconds() - chainID, _ := ctx.Value(middlewares.ContextKeyChainID).(tableland.ChainID) attributes := append([]attribute.KeyValue{ {Key: "method", Value: attribute.StringValue("RunReadQuery")}, {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(chainID))}, }, metrics.BaseAttrs...) g.callCount.Add(ctx, 1, attributes...) diff --git a/internal/gateway/impl/gateway_store.go b/internal/gateway/impl/gateway_store.go new file mode 100644 index 00000000..389b2cc2 --- /dev/null +++ b/internal/gateway/impl/gateway_store.go @@ -0,0 +1,173 @@ +package impl + +import ( + "context" + "database/sql" + "fmt" + "strings" + + "github.com/tablelandnetwork/sqlparser" + "github.com/textileio/go-tableland/internal/gateway" + "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" + "github.com/textileio/go-tableland/pkg/database/db" + "github.com/textileio/go-tableland/pkg/parsing" + "github.com/textileio/go-tableland/pkg/tables" +) + +// GatewayStore is the storage layer of the gateway. +type GatewayStore struct { + db *database.SQLiteDB + resolver sqlparser.ReadStatementResolver +} + +// NewGatewayStore creates a new GatewayStore. +func NewGatewayStore(db *database.SQLiteDB, resolver sqlparser.ReadStatementResolver) *GatewayStore { + return &GatewayStore{ + db: db, + resolver: resolver, + } +} + +// Read executes a parsed read statement. +func (s *GatewayStore) Read(ctx context.Context, stmt parsing.ReadStmt) (*gateway.TableData, error) { + query, err := stmt.GetQuery(s.resolver) + if err != nil { + return nil, fmt.Errorf("get query: %s", err) + } + ret, err := s.execReadQuery(ctx, query) + if err != nil { + return nil, fmt.Errorf("parsing result to json: %s", err) + } + + return ret, nil +} + +// GetTable returns a table information. +func (s *GatewayStore) GetTable( + ctx context.Context, chainID tableland.ChainID, tableID tables.TableID, +) (gateway.Table, error) { + table, err := s.db.Queries.GetTable(ctx, db.GetTableParams{ + ChainID: int64(chainID), + ID: tableID.ToBigInt().Int64(), + }) + if err == sql.ErrNoRows { + return gateway.Table{}, fmt.Errorf("not found: %w", err) + } + if err != nil { + return gateway.Table{}, fmt.Errorf("getting table: %s", err) + } + + tableID, err = tables.NewTableIDFromInt64(table.ID) + if err != nil { + return gateway.Table{}, fmt.Errorf("table id from int64: %s", err) + } + + return gateway.Table{ + ID: tableID, + ChainID: tableland.ChainID(table.ChainID), + Controller: table.Controller, + Prefix: table.Prefix, + Structure: table.Structure, + }, nil +} + +// GetSchemaByTableName returns the table schema given its name. +func (s *GatewayStore) GetSchemaByTableName(ctx context.Context, tblName string) (gateway.TableSchema, error) { + createStmt, err := s.db.Queries.GetSchemaByTableName(ctx, tblName) + if err != nil { + return gateway.TableSchema{}, fmt.Errorf("failed to get the table: %s", err) + } + + if strings.Contains(strings.ToLower(createStmt), "autoincrement") { + createStmt = strings.Replace(createStmt, "autoincrement", "", -1) + } + + index := strings.LastIndex(strings.ToLower(createStmt), "strict") + ast, err := sqlparser.Parse(createStmt[:index]) + if err != nil { + return gateway.TableSchema{}, fmt.Errorf("failed to parse create stmt: %s", err) + } + + if ast.Errors[0] != nil { + return gateway.TableSchema{}, fmt.Errorf("non-syntax error: %s", ast.Errors[0]) + } + + createTableNode := ast.Statements[0].(*sqlparser.CreateTable) + columns := make([]gateway.ColumnSchema, len(createTableNode.ColumnsDef)) + for i, col := range createTableNode.ColumnsDef { + colConstraints := []string{} + for _, colConstraint := range col.Constraints { + colConstraints = append(colConstraints, colConstraint.String()) + } + + columns[i] = gateway.ColumnSchema{ + Name: col.Column.String(), + Type: strings.ToLower(col.Type), + Constraints: colConstraints, + } + } + + tableConstraints := make([]string, len(createTableNode.Constraints)) + for i, tableConstraint := range createTableNode.Constraints { + tableConstraints[i] = tableConstraint.String() + } + + return gateway.TableSchema{ + Columns: columns, + TableConstraints: tableConstraints, + }, nil +} + +// GetReceipt gets the receipt of a given transaction hash. +func (s *GatewayStore) GetReceipt( + ctx context.Context, chainID tableland.ChainID, txnHash string, +) (gateway.Receipt, bool, error) { + params := db.GetReceiptParams{ + ChainID: int64(chainID), + TxnHash: txnHash, + } + + res, err := s.db.Queries.GetReceipt(ctx, params) + if err == sql.ErrNoRows { + return gateway.Receipt{}, false, nil + } + if err != nil { + return gateway.Receipt{}, false, fmt.Errorf("get receipt: %s", err) + } + + receipt := gateway.Receipt{ + ChainID: chainID, + BlockNumber: res.BlockNumber, + IndexInBlock: res.IndexInBlock, + TxnHash: txnHash, + } + if res.Error.Valid { + receipt.Error = &res.Error.String + + errorEventIdx := int(res.ErrorEventIdx.Int64) + receipt.ErrorEventIdx = &errorEventIdx + } + if res.TableID.Valid { + id, err := tables.NewTableIDFromInt64(res.TableID.Int64) + if err != nil { + return gateway.Receipt{}, false, fmt.Errorf("parsing id to string: %s", err) + } + receipt.TableID = &id + } + + return receipt, true, nil +} + +func (s *GatewayStore) execReadQuery(ctx context.Context, q string) (*gateway.TableData, error) { + rows, err := s.db.DB.QueryContext(ctx, q) + if err != nil { + return nil, fmt.Errorf("executing query: %s", err) + } + defer func() { + if err = rows.Close(); err != nil { + s.db.Log.Warn().Err(err).Msg("closing rows") + } + }() + return rowsToTableData(rows) +} diff --git a/internal/gateway/gateway_test.go b/internal/gateway/impl/gateway_store_test.go similarity index 55% rename from internal/gateway/gateway_test.go rename to internal/gateway/impl/gateway_store_test.go index 9504ffc0..579ba2cc 100644 --- a/internal/gateway/gateway_test.go +++ b/internal/gateway/impl/gateway_store_test.go @@ -1,8 +1,8 @@ -package gateway +package impl import ( "context" - "database/sql" + "encoding/json" "fmt" "math/big" "testing" @@ -10,14 +10,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" + "github.com/textileio/go-tableland/internal/gateway" "github.com/textileio/go-tableland/internal/router/middlewares" "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" executor "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor/impl" "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" - "github.com/textileio/go-tableland/pkg/sqlstore" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" "github.com/textileio/go-tableland/tests" @@ -31,7 +31,7 @@ func TestGatewayInitialization(t *testing.T) { t.Run("invalid external uri", func(t *testing.T) { t.Parallel() - _, err := NewGateway(nil, nil, "invalid uri", "", "") + _, err := gateway.NewGateway(nil, nil, "invalid uri", "", "") require.Error(t, err) require.ErrorContains(t, err, "invalid external url prefix") }) @@ -39,7 +39,7 @@ func TestGatewayInitialization(t *testing.T) { t.Run("invalid metadata uri", func(t *testing.T) { t.Parallel() - _, err := NewGateway(nil, nil, "https://tableland.network", "invalid uri", "") + _, err := gateway.NewGateway(nil, nil, "https://tableland.network", "invalid uri", "") require.Error(t, err) require.ErrorContains(t, err, "metadata renderer uri could not be parsed") }) @@ -47,7 +47,7 @@ func TestGatewayInitialization(t *testing.T) { t.Run("invalid animation uri", func(t *testing.T) { t.Parallel() - _, err := NewGateway(nil, nil, "https://tableland.network", "https://tables.tableland.xyz", "invalid uri") + _, err := gateway.NewGateway(nil, nil, "https://tableland.network", "https://tables.tableland.xyz", "invalid uri") require.Error(t, err) require.ErrorContains(t, err, "animation renderer uri could not be parsed") }) @@ -56,19 +56,15 @@ func TestGatewayInitialization(t *testing.T) { func TestGateway(t *testing.T) { dbURI := tests.Sqlite3URI(t) - ctx := context.WithValue(context.Background(), middlewares.ContextKeyChainID, tableland.ChainID(1337)) - store, err := system.New(dbURI, chainID) - require.NoError(t, err) + ctx := context.WithValue(context.Background(), middlewares.ContextKeyChainID, chainID) parser, err := parserimpl.New([]string{"system_", "registry"}) require.NoError(t, err) - db, err := sql.Open("sqlite3", dbURI) + db, err := database.Open(dbURI, 1) require.NoError(t, err) - db.SetMaxOpenConns(1) - // populate the registry with a table - ex, err := executor.NewExecutor(1337, db, parser, 0, nil) + ex, err := executor.NewExecutor(chainID, db, parser, 0, nil) require.NoError(t, err) bs, err := ex.NewBlockScope(ctx, 0) require.NoError(t, err) @@ -96,20 +92,18 @@ func TestGateway(t *testing.T) { parser, err = parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - stack := map[tableland.ChainID]sqlstore.SystemStore{1337: store} - svc, err := NewGateway(parser, stack, "https://tableland.network", "https://tables.tableland.xyz", "") + svc, err := gateway.NewGateway( + parser, NewGatewayStore(db, nil), "https://tableland.network", "https://tables.tableland.xyz", "", + ) require.NoError(t, err) - metadata, err := svc.GetTableMetadata(ctx, id) + metadata, err := svc.GetTableMetadata(ctx, chainID, id) require.NoError(t, err) require.Equal(t, "foo_1337_42", metadata.Name) - require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", 1337, id), metadata.ExternalURL) + require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", chainID, id), metadata.ExternalURL) require.Equal(t, "https://tables.tableland.xyz/1337/42.svg", metadata.Image) //nolint require.Equal(t, "date", metadata.Attributes[0].DisplayType) require.Equal(t, "created", metadata.Attributes[0].TraitType) - - // this is hard to test because the created_at comes from the database. just testing is not the 1970 value - require.NotEqual(t, new(time.Time).Unix(), metadata.Attributes[0].Value) } func TestGetMetadata(t *testing.T) { @@ -117,27 +111,22 @@ func TestGetMetadata(t *testing.T) { dbURI := tests.Sqlite3URI(t) - ctx := context.WithValue(context.Background(), middlewares.ContextKeyChainID, tableland.ChainID(1337)) - store, err := system.New(dbURI, chainID) - require.NoError(t, err) - parser, err := parserimpl.New([]string{"system_", "registry"}) require.NoError(t, err) - db, err := sql.Open("sqlite3", dbURI) + db, err := database.Open(dbURI, 1) require.NoError(t, err) - db.SetMaxOpenConns(1) // populate the registry with a table - ex, err := executor.NewExecutor(1337, db, parser, 0, nil) + ex, err := executor.NewExecutor(chainID, db, parser, 0, nil) require.NoError(t, err) - bs, err := ex.NewBlockScope(ctx, 0) + bs, err := ex.NewBlockScope(context.Background(), 0) require.NoError(t, err) id, _ := tables.NewTableID("42") require.NoError(t, err) - res, err := bs.ExecuteTxnEvents(ctx, eventfeed.TxnEvents{ + res, err := bs.ExecuteTxnEvents(context.Background(), eventfeed.TxnEvents{ TxnHash: common.HexToHash("0x0"), Events: []interface{}{ ðereum.ContractCreateTable{ @@ -153,23 +142,21 @@ func TestGetMetadata(t *testing.T) { require.NoError(t, bs.Commit()) require.NoError(t, bs.Close()) - stack := map[tableland.ChainID]sqlstore.SystemStore{1337: store} - t.Run("empty metadata uri", func(t *testing.T) { t.Parallel() parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - svc, err := NewGateway(parser, stack, "https://tableland.network", "", "") + svc, err := gateway.NewGateway(parser, NewGatewayStore(db, nil), "https://tableland.network", "", "") require.NoError(t, err) - metadata, err := svc.GetTableMetadata(ctx, id) + metadata, err := svc.GetTableMetadata(context.Background(), chainID, id) require.NoError(t, err) require.Equal(t, "foo_1337_42", metadata.Name) - require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", 1337, id), metadata.ExternalURL) - require.Equal(t, DefaultMetadataImage, metadata.Image) + require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", chainID, id), metadata.ExternalURL) + require.Equal(t, gateway.DefaultMetadataImage, metadata.Image) require.Equal(t, "date", metadata.Attributes[0].DisplayType) require.Equal(t, "created", metadata.Attributes[0].TraitType) }) @@ -180,14 +167,16 @@ func TestGetMetadata(t *testing.T) { parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - svc, err := NewGateway(parser, stack, "https://tableland.network", "https://tables.tableland.xyz", "") + svc, err := gateway.NewGateway( + parser, NewGatewayStore(db, nil), "https://tableland.network", "https://tables.tableland.xyz", "", + ) require.NoError(t, err) - metadata, err := svc.GetTableMetadata(ctx, id) + metadata, err := svc.GetTableMetadata(context.Background(), chainID, id) require.NoError(t, err) require.Equal(t, "foo_1337_42", metadata.Name) - require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", 1337, id), metadata.ExternalURL) + require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", chainID, id), metadata.ExternalURL) require.Equal(t, "https://tables.tableland.xyz/1337/42.svg", metadata.Image) require.Equal(t, "date", metadata.Attributes[0].DisplayType) require.Equal(t, "created", metadata.Attributes[0].TraitType) @@ -199,14 +188,17 @@ func TestGetMetadata(t *testing.T) { parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - svc, err := NewGateway(parser, stack, "https://tableland.network", "https://tables.tableland.xyz/", "") + svc, err := gateway.NewGateway( + parser, NewGatewayStore(db, nil), "https://tableland.network", "https://tables.tableland.xyz/", "", + ) require.NoError(t, err) - metadata, err := svc.GetTableMetadata(ctx, id) + metadata, err := svc.GetTableMetadata(context.Background(), chainID, id) require.NoError(t, err) require.Equal(t, "foo_1337_42", metadata.Name) - require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", 1337, id), metadata.ExternalURL) + + require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", chainID, id), metadata.ExternalURL) require.Equal(t, "https://tables.tableland.xyz/1337/42.svg", metadata.Image) require.Equal(t, "date", metadata.Attributes[0].DisplayType) require.Equal(t, "created", metadata.Attributes[0].TraitType) @@ -218,7 +210,7 @@ func TestGetMetadata(t *testing.T) { parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - _, err = NewGateway(parser, stack, "https://tableland.network", "foo", "") + _, err = gateway.NewGateway(parser, NewGatewayStore(db, nil), "https://tableland.network", "foo", "") require.Error(t, err) require.ErrorContains(t, err, "metadata renderer uri could not be parsed") }) @@ -229,15 +221,17 @@ func TestGetMetadata(t *testing.T) { parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - svc, err := NewGateway(parser, stack, "https://tableland.network", "https://tables.tableland.xyz", "") + svc, err := gateway.NewGateway( + parser, NewGatewayStore(db, nil), "https://tableland.network", "https://tables.tableland.xyz", "", + ) require.NoError(t, err) id, _ := tables.NewTableID("43") require.NoError(t, err) - metadata, err := svc.GetTableMetadata(ctx, id) - require.ErrorIs(t, err, ErrTableNotFound) - require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", 1337, id), metadata.ExternalURL) + metadata, err := svc.GetTableMetadata(context.Background(), chainID, id) + require.ErrorIs(t, err, gateway.ErrTableNotFound) + require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", chainID, id), metadata.ExternalURL) require.Equal(t, "data:image/svg+xml;base64,PHN2ZyB3aWR0aD0nNTEyJyBoZWlnaHQ9JzUxMicgeG1sbnM9J2h0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnJz48cmVjdCB3aWR0aD0nNTEyJyBoZWlnaHQ9JzUxMicgZmlsbD0nIzAwMCcvPjwvc3ZnPg==", metadata.Image) // nolint require.Equal(t, "Table not found", metadata.Message) }) @@ -248,20 +242,20 @@ func TestGetMetadata(t *testing.T) { parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - svc, err := NewGateway( + svc, err := gateway.NewGateway( parser, - stack, + NewGatewayStore(db, nil), "https://tableland.network", "https://tables.tableland.xyz", "https://tables.tableland.xyz", ) require.NoError(t, err) - metadata, err := svc.GetTableMetadata(ctx, id) + metadata, err := svc.GetTableMetadata(context.Background(), chainID, id) require.NoError(t, err) require.Equal(t, "foo_1337_42", metadata.Name) - require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", 1337, id), metadata.ExternalURL) + require.Equal(t, fmt.Sprintf("https://tableland.network/api/v1/tables/%d/%s", chainID, id), metadata.ExternalURL) require.Equal(t, "https://tables.tableland.xyz/1337/42.svg", metadata.Image) require.Equal(t, "https://tables.tableland.xyz/1337/42.html", metadata.AnimationURL) require.Equal(t, "date", metadata.Attributes[0].DisplayType) @@ -273,11 +267,8 @@ func TestQueryConstraints(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - - ctx := context.WithValue(context.Background(), middlewares.ContextKeyChainID, tableland.ChainID(1337)) - store, err := system.New(dbURI, chainID) + db, err := database.Open(dbURI, 1) require.NoError(t, err) - stack := map[tableland.ChainID]sqlstore.SystemStore{1337: store} parsingOpts := []parsing.Option{ parsing.WithMaxReadQuerySize(44), @@ -289,17 +280,150 @@ func TestQueryConstraints(t *testing.T) { t.Run("read-query-size-nok", func(t *testing.T) { t.Parallel() - gateway, err := NewGateway( + gateway, err := gateway.NewGateway( parser, - stack, + NewGatewayStore(db, nil), "https://tableland.network", "https://tables.tableland.xyz", "https://tables.tableland.xyz", ) require.NoError(t, err) - _, err = gateway.RunReadQuery(ctx, "SELECT * FROM foo_1337_1 WHERE bar = 'hello2'") // length of 45 bytes + _, err = gateway.RunReadQuery( + context.Background(), "SELECT * FROM foo_1337_1 WHERE bar = 'hello2'", + ) // length of 45 bytes require.Error(t, err) require.ErrorContains(t, err, "read query size is too long") }) } + +func TestUserValue(t *testing.T) { + uv := &gateway.ColumnValue{} + + var in0 int64 = 100 + require.NoError(t, uv.Scan(in0)) + val := uv.Value() + v0, ok := val.(int64) + require.True(t, ok) + require.Equal(t, in0, v0) + b, err := json.Marshal(uv) + require.NoError(t, err) + var out0 int64 + require.NoError(t, json.Unmarshal(b, &out0)) + require.Equal(t, in0, out0) + + in1 := 100.0 + require.NoError(t, uv.Scan(in1)) + val = uv.Value() + v1, ok := val.(float64) + require.True(t, ok) + require.Equal(t, in1, v1) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out1 float64 + require.NoError(t, json.Unmarshal(b, &out1)) + require.Equal(t, in1, out1) + + in2 := true + require.NoError(t, uv.Scan(in2)) + val = uv.Value() + v2, ok := val.(bool) + require.True(t, ok) + require.Equal(t, in2, v2) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out2 bool + require.NoError(t, json.Unmarshal(b, &out2)) + require.Equal(t, in2, out2) + + in3 := []byte("hello there") + require.NoError(t, uv.Scan(in3)) + val = uv.Value() + v3, ok := val.([]byte) + require.True(t, ok) + require.Equal(t, in3, v3) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out3 []byte + require.NoError(t, json.Unmarshal(b, &out3)) + require.Equal(t, in3, out3) + + in4 := "hello" + require.NoError(t, uv.Scan(in4)) + val = uv.Value() + v4, ok := val.(string) + require.True(t, ok) + require.Equal(t, in4, v4) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out4 string + require.NoError(t, json.Unmarshal(b, &out4)) + require.Equal(t, in4, out4) + + in5 := time.Now() + require.NoError(t, uv.Scan(in5)) + val = uv.Value() + v5, ok := val.(time.Time) + require.True(t, ok) + require.Equal(t, in5, v5) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out5 time.Time + require.NoError(t, json.Unmarshal(b, &out5)) + require.Equal(t, in5.Unix(), out5.Unix()) + + var in6 interface{} + require.NoError(t, uv.Scan(in6)) + val = uv.Value() + require.Nil(t, val) + require.Equal(t, in6, val) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out6 interface{} + require.NoError(t, json.Unmarshal(b, &out6)) + require.Equal(t, in6, out6) + + in7 := "{ \"hello" + require.NoError(t, uv.Scan(in7)) + val = uv.Value() + v7, ok := val.(string) + require.True(t, ok) + require.Equal(t, in7, v7) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out7 string + require.NoError(t, json.Unmarshal(b, &out7)) + require.Equal(t, in7, out7) + + in8 := "[ \"hello" + require.NoError(t, uv.Scan(in8)) + val = uv.Value() + v8, ok := val.(string) + require.True(t, ok) + require.Equal(t, in8, v8) + b, err = json.Marshal(uv) + require.NoError(t, err) + var out8 string + require.NoError(t, json.Unmarshal(b, &out8)) + require.Equal(t, in8, out8) + + in9 := "{\"name\":\"aaron\"}" + require.NoError(t, uv.Scan(in9)) + val = uv.Value() + v9, ok := val.(json.RawMessage) + require.True(t, ok) + require.Greater(t, len(v9), 0) + b, err = json.Marshal(uv) + require.NoError(t, err) + require.Equal(t, in9, string(b)) + + in10 := "[\"one\",\"two\"]" + require.NoError(t, uv.Scan(in10)) + val = uv.Value() + v10, ok := val.(json.RawMessage) + require.True(t, ok) + require.Greater(t, len(v10), 0) + b, err = json.Marshal(uv) + require.NoError(t, err) + require.Equal(t, in10, string(b)) +} diff --git a/pkg/sqlstore/impl/system/rowstotabledata.go b/internal/gateway/impl/rowstotabledata.go similarity index 58% rename from pkg/sqlstore/impl/system/rowstotabledata.go rename to internal/gateway/impl/rowstotabledata.go index eed05b03..f2382a34 100644 --- a/pkg/sqlstore/impl/system/rowstotabledata.go +++ b/internal/gateway/impl/rowstotabledata.go @@ -1,13 +1,13 @@ -package system +package impl import ( "database/sql" "fmt" - "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/internal/gateway" ) -func rowsToTableData(rows *sql.Rows) (*tableland.TableData, error) { +func rowsToTableData(rows *sql.Rows) (*gateway.TableData, error) { columns, err := getColumnsData(rows) if err != nil { return nil, fmt.Errorf("get columns from rows: %s", err) @@ -17,30 +17,30 @@ func rowsToTableData(rows *sql.Rows) (*tableland.TableData, error) { return nil, err } - return &tableland.TableData{ + return &gateway.TableData{ Columns: columns, Rows: rowsData, }, nil } -func getColumnsData(rows *sql.Rows) ([]tableland.Column, error) { +func getColumnsData(rows *sql.Rows) ([]gateway.Column, error) { cols, err := rows.Columns() if err != nil { return nil, fmt.Errorf("get columns from sql.Rows: %s", err) } - columns := make([]tableland.Column, len(cols)) + columns := make([]gateway.Column, len(cols)) for i := range cols { - columns[i] = tableland.Column{Name: cols[i]} + columns[i] = gateway.Column{Name: cols[i]} } return columns, nil } -func getRowsData(rows *sql.Rows, numColumns int) ([][]*tableland.ColumnValue, error) { - rowsData := make([][]*tableland.ColumnValue, 0) +func getRowsData(rows *sql.Rows, numColumns int) ([][]*gateway.ColumnValue, error) { + rowsData := make([][]*gateway.ColumnValue, 0) for rows.Next() { - vals := make([]*tableland.ColumnValue, numColumns) + vals := make([]*gateway.ColumnValue, numColumns) for i := range vals { - val := &tableland.ColumnValue{} + val := &gateway.ColumnValue{} vals[i] = val } scanArgs := make([]interface{}, len(vals)) diff --git a/internal/router/controllers/controller.go b/internal/router/controllers/controller.go index 1c152c3b..7356118b 100644 --- a/internal/router/controllers/controller.go +++ b/internal/router/controllers/controller.go @@ -22,11 +22,6 @@ import ( "github.com/textileio/go-tableland/pkg/telemetry" ) -// SQLRunner defines the run SQL interface of Tableland. -type SQLRunner interface { - RunReadQuery(ctx context.Context, stmt string) (*tableland.TableData, error) -} - // Controller defines the HTTP handlers for interacting with user tables. type Controller struct { gateway gateway.Gateway @@ -115,7 +110,9 @@ func (c *Controller) GetReceiptByTransactionHash(rw http.ResponseWriter, r *http } txnHash := common.HexToHash(paramTxnHash) - receipt, exists, err := c.gateway.GetReceiptByTransactionHash(ctx, txnHash) + receipt, exists, err := c.gateway.GetReceiptByTransactionHash( + ctx, ctx.Value(middlewares.ContextKeyChainID).(tableland.ChainID), txnHash, + ) if err != nil { rw.Header().Set("Content-Type", "application/json") rw.WriteHeader(http.StatusBadRequest) @@ -163,8 +160,7 @@ func (c *Controller) GetTable(rw http.ResponseWriter, r *http.Request) { _ = json.NewEncoder(rw).Encode(errors.ServiceError{Message: "Invalid id format"}) return } - - metadata, err := c.gateway.GetTableMetadata(ctx, id) + metadata, err := c.gateway.GetTableMetadata(ctx, ctx.Value(middlewares.ContextKeyChainID).(tableland.ChainID), id) if err == gateway.ErrTableNotFound { rw.WriteHeader(http.StatusNotFound) return @@ -266,7 +262,7 @@ func (c *Controller) runReadRequest( ctx context.Context, stm string, rw http.ResponseWriter, -) (*tableland.TableData, bool) { +) (*gateway.TableData, bool) { res, err := c.gateway.RunReadQuery(ctx, stm) if err != nil { rw.WriteHeader(http.StatusBadRequest) diff --git a/internal/router/controllers/controller_test.go b/internal/router/controllers/controller_test.go index 30283c04..d1b06cc1 100644 --- a/internal/router/controllers/controller_test.go +++ b/internal/router/controllers/controller_test.go @@ -13,36 +13,36 @@ import ( "github.com/gorilla/mux" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/textileio/go-tableland/internal/gateway" "github.com/textileio/go-tableland/internal/router/middlewares" "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/mocks" - "github.com/textileio/go-tableland/pkg/sqlstore" ) func TestQuery(t *testing.T) { r := mocks.NewGateway(t) r.EXPECT().RunReadQuery(mock.Anything, mock.AnythingOfType("string")).Return( - &tableland.TableData{ - Columns: []tableland.Column{ + &gateway.TableData{ + Columns: []gateway.Column{ {Name: "id"}, {Name: "eyes"}, {Name: "mouth"}, }, - Rows: [][]*tableland.ColumnValue{ + Rows: [][]*gateway.ColumnValue{ { - tableland.OtherColValue(1), - tableland.OtherColValue("Big"), - tableland.OtherColValue("Surprised"), + gateway.OtherColValue(1), + gateway.OtherColValue("Big"), + gateway.OtherColValue("Surprised"), }, { - tableland.OtherColValue(2), - tableland.OtherColValue("Medium"), - tableland.OtherColValue("Sad"), + gateway.OtherColValue(2), + gateway.OtherColValue("Medium"), + gateway.OtherColValue("Sad"), }, { - tableland.OtherColValue(3), - tableland.OtherColValue("Small"), - tableland.OtherColValue("Happy"), + gateway.OtherColValue(3), + gateway.OtherColValue("Small"), + gateway.OtherColValue("Happy"), }, }, }, @@ -91,12 +91,12 @@ func TestQuery(t *testing.T) { func TestQueryExtracted(t *testing.T) { r := mocks.NewGateway(t) r.EXPECT().RunReadQuery(mock.Anything, mock.AnythingOfType("string")).Return( - &tableland.TableData{ - Columns: []tableland.Column{{Name: "name"}}, - Rows: [][]*tableland.ColumnValue{ - {tableland.OtherColValue("bob")}, - {tableland.OtherColValue("jane")}, - {tableland.OtherColValue("alex")}, + &gateway.TableData{ + Columns: []gateway.Column{{Name: "name"}}, + Rows: [][]*gateway.ColumnValue{ + {gateway.OtherColValue("bob")}, + {gateway.OtherColValue("jane")}, + {gateway.OtherColValue("alex")}, }, }, nil, @@ -138,21 +138,21 @@ func TestQueryExtracted(t *testing.T) { func TestGetTablesByMocked(t *testing.T) { t.Parallel() - gateway := mocks.NewGateway(t) - gateway.EXPECT().GetTableMetadata(mock.Anything, mock.Anything).Return( - sqlstore.TableMetadata{ + g := mocks.NewGateway(t) + g.EXPECT().GetTableMetadata(mock.Anything, mock.Anything, mock.Anything).Return( + gateway.TableMetadata{ Name: "name-1", - ExternalURL: "https://tableland.network/tables/100", + ExternalURL: "https://gateway.network/tables/100", Image: "https://bafkreifhuhrjhzbj4onqgbrmhpysk2mop2jimvdvfut6taiyzt2yqzt43a.ipfs.dweb.link", - Attributes: []sqlstore.TableMetadataAttribute{ + Attributes: []gateway.TableMetadataAttribute{ { DisplayType: "date", TraitType: "created", Value: 1546360800, }, }, - Schema: sqlstore.TableSchema{ - Columns: []sqlstore.ColumnSchema{ + Schema: gateway.TableSchema{ + Columns: []gateway.ColumnSchema{ { Name: "foo", Type: "text", @@ -163,13 +163,15 @@ func TestGetTablesByMocked(t *testing.T) { nil, ) - ctrl := NewController(gateway) + ctrl := NewController(g) t.Run("get table metadata", func(t *testing.T) { t.Parallel() req, err := http.NewRequest("GET", "/api/v1/tables/1337/100", nil) require.NoError(t, err) + req = req.WithContext(context.WithValue(req.Context(), middlewares.ContextKeyChainID, tableland.ChainID(1337))) + router := mux.NewRouter() router.HandleFunc("/api/v1/tables/{chainID}/{tableId}", ctrl.GetTable) @@ -180,7 +182,7 @@ func TestGetTablesByMocked(t *testing.T) { //nolint expJSON := `{ "name":"name-1", - "external_url":"https://tableland.network/tables/100", + "external_url":"https://gateway.network/tables/100", "image":"https://bafkreifhuhrjhzbj4onqgbrmhpysk2mop2jimvdvfut6taiyzt2yqzt43a.ipfs.dweb.link", "attributes":[{"display_type":"date","trait_type":"created","value":1546360800}], "schema":{"columns":[{"name":"foo","type":"text"}]} @@ -197,6 +199,8 @@ func TestGetTableWithInvalidID(t *testing.T) { req, err := http.NewRequest("GET", path, nil) require.NoError(t, err) + req = req.WithContext(context.WithValue(req.Context(), middlewares.ContextKeyChainID, tableland.ChainID(1337))) + gateway := mocks.NewGateway(t) ctrl := NewController(gateway) @@ -218,13 +222,15 @@ func TestTableNotFoundMock(t *testing.T) { req, err := http.NewRequest("GET", "/tables/100", nil) require.NoError(t, err) - gateway := mocks.NewGateway(t) - gateway.EXPECT().GetTableMetadata(mock.Anything, mock.Anything).Return( - sqlstore.TableMetadata{}, + req = req.WithContext(context.WithValue(req.Context(), middlewares.ContextKeyChainID, tableland.ChainID(1337))) + + g := mocks.NewGateway(t) + g.EXPECT().GetTableMetadata(mock.Anything, mock.Anything, mock.Anything).Return( + gateway.TableMetadata{}, errors.New("failed"), ) - ctrl := NewController(gateway) + ctrl := NewController(g) router := mux.NewRouter() router.HandleFunc("/tables/{tableId}", ctrl.GetTable) diff --git a/internal/tableland/acl.go b/internal/tableland/acl.go index d61be100..102c7ff5 100644 --- a/internal/tableland/acl.go +++ b/internal/tableland/acl.go @@ -6,13 +6,14 @@ import ( "fmt" "github.com/ethereum/go-ethereum/common" + "github.com/textileio/go-tableland/pkg/tables" ) // ACL is the API for access control rules check. type ACL interface { // CheckPrivileges checks if an address can execute a specific operation on a table. - CheckPrivileges(context.Context, *sql.Tx, common.Address, tables.TableID, Operation) (bool, error) + CheckPrivileges(context.Context, *sql.Tx, ChainID, common.Address, tables.TableID, Operation) (bool, error) } // Privilege maps to SQL privilege and is the thing needed to execute an operation. diff --git a/internal/tableland/impl/acl.go b/internal/tableland/impl/acl.go index 0bd10f0a..423c1b87 100644 --- a/internal/tableland/impl/acl.go +++ b/internal/tableland/impl/acl.go @@ -7,34 +7,52 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/textileio/go-tableland/internal/tableland" - "github.com/textileio/go-tableland/pkg/sqlstore" + "github.com/textileio/go-tableland/pkg/database" + "github.com/textileio/go-tableland/pkg/database/db" "github.com/textileio/go-tableland/pkg/tables" ) -type acl struct { - store sqlstore.SystemStore +// ACLStore has access to the stored acl information. +type ACLStore struct { + db *database.SQLiteDB } // NewACL creates a new instance of the ACL. -func NewACL(store sqlstore.SystemStore) tableland.ACL { - return &acl{ - store: store, +func NewACL(db *database.SQLiteDB) *ACLStore { + return &ACLStore{ + db: db, } } +var _ tableland.ACL = (*ACLStore)(nil) + // CheckPrivileges checks if an address can execute a specific operation on a table. -func (acl *acl) CheckPrivileges( +func (acl *ACLStore) CheckPrivileges( ctx context.Context, tx *sql.Tx, + chainID tableland.ChainID, controller common.Address, id tables.TableID, op tableland.Operation, ) (bool, error) { - aclRule, err := acl.store.WithTx(tx).GetACLOnTableByController(ctx, id, controller.String()) + row, err := acl.db.Queries.WithTx(tx).GetAclByTableAndController(ctx, db.GetAclByTableAndControllerParams{ + ChainID: int64(chainID), + TableID: id.ToBigInt().Int64(), + UPPER: controller.Hex(), + }) + if err == sql.ErrNoRows { + return false, nil + } + if err != nil { return false, fmt.Errorf("privileges lookup: %s", err) } + aclRule, err := aclFromSQLtoDTO(row) + if err != nil { + return false, fmt.Errorf("transforming to dto: %s", err) + } + isAllowed, _ := aclRule.Privileges.CanExecute(op) if !isAllowed { return false, nil @@ -42,3 +60,40 @@ func (acl *acl) CheckPrivileges( return true, nil } + +func aclFromSQLtoDTO(acl db.SystemAcl) (SystemACL, error) { + id, err := tables.NewTableIDFromInt64(acl.TableID) + if err != nil { + return SystemACL{}, fmt.Errorf("parsing id to string: %s", err) + } + + var privileges tableland.Privileges + if acl.Privileges&tableland.PrivInsert.Bitfield > 0 { + privileges = append(privileges, tableland.PrivInsert) + } + if acl.Privileges&tableland.PrivUpdate.Bitfield > 0 { + privileges = append(privileges, tableland.PrivUpdate) + } + if acl.Privileges&tableland.PrivDelete.Bitfield > 0 { + privileges = append(privileges, tableland.PrivDelete) + } + + systemACL := SystemACL{ + ChainID: tableland.ChainID(acl.ChainID), + TableID: id, + Controller: acl.Controller, + Privileges: privileges, + } + + return systemACL, nil +} + +// SystemACL represents the system acl table. +type SystemACL struct { + Controller string + ChainID tableland.ChainID + TableID tables.TableID + Privileges tableland.Privileges + // CreatedAt time.Time + // UpdatedAt *time.Time +} diff --git a/internal/tableland/impl/tableland_test.go b/internal/tableland/impl/tableland_test.go index 1ccf84d5..dc9de7c0 100644 --- a/internal/tableland/impl/tableland_test.go +++ b/internal/tableland/impl/tableland_test.go @@ -3,7 +3,6 @@ package impl import ( "context" "crypto/ecdsa" - "database/sql" "encoding/csv" "encoding/hex" "encoding/json" @@ -22,16 +21,17 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/textileio/go-tableland/internal/gateway" + gatewayimpl "github.com/textileio/go-tableland/internal/gateway/impl" "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" efimpl "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed/impl" epimpl "github.com/textileio/go-tableland/pkg/eventprocessor/impl" executor "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor/impl" "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" + "github.com/textileio/go-tableland/pkg/sharedmemory" - "github.com/textileio/go-tableland/pkg/sqlstore" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" @@ -73,7 +73,7 @@ func TestInsertOnConflict(t *testing.T) { build(t) tablelandClient := setup.newTablelandClient(t) - ctx, backend, sc, store := setup.ctx, setup.ethClient, setup.contract, setup.systemStore + ctx, backend, sc := setup.ctx, setup.ethClient, setup.contract gateway, txOpts := tablelandClient.gateway, tablelandClient.txOpts caller := txOpts.From @@ -104,7 +104,7 @@ func TestInsertOnConflict(t *testing.T) { time.Second*5, time.Millisecond*100, ) - requireReceipts(ctx, t, store, txnHashes, true) + requireReceipts(ctx, t, gateway, txnHashes, true) } func TestMultiStatement(t *testing.T) { @@ -114,7 +114,7 @@ func TestMultiStatement(t *testing.T) { build(t) tablelandClient := setup.newTablelandClient(t) - ctx, backend, sc, store := setup.ctx, setup.ethClient, setup.contract, setup.systemStore + ctx, backend, sc := setup.ctx, setup.ethClient, setup.contract gateway, txOpts := tablelandClient.gateway, tablelandClient.txOpts caller := txOpts.From @@ -140,7 +140,7 @@ func TestMultiStatement(t *testing.T) { time.Second*5, time.Millisecond*100, ) - requireReceipts(ctx, t, store, []string{r.Hash().Hex()}, true) + requireReceipts(ctx, t, gateway, []string{r.Hash().Hex()}, true) } func TestReadSystemTable(t *testing.T) { @@ -157,7 +157,7 @@ func TestReadSystemTable(t *testing.T) { _, err := sc.CreateTable(txOpts, caller, `CREATE TABLE foo_1337 (myjson TEXT);`) require.NoError(t, err) - res, err := runReadQuery(ctx, t, gateway, "select * from registry") + res, err := gateway.RunReadQuery(ctx, "select * from registry") require.NoError(t, err) _, err = json.Marshal(res) require.NoError(t, err) @@ -211,7 +211,7 @@ func TestCheckInsertPrivileges(t *testing.T) { granterSetup := setup.newTablelandClient(t) granteeSetup := setup.newTablelandClient(t) - ctx, backend, sc, store := setup.ctx, setup.ethClient, setup.contract, setup.systemStore + ctx, backend, sc := setup.ctx, setup.ethClient, setup.contract txOptsGranter := granterSetup.txOpts gatewayGrantee, txOptsGrantee := granteeSetup.gateway, granteeSetup.txOpts @@ -249,11 +249,11 @@ func TestCheckInsertPrivileges(t *testing.T) { 100*time.Millisecond, ) successfulTxnHashes = append(successfulTxnHashes, txn.Hash().Hex()) - requireReceipts(ctx, t, store, successfulTxnHashes, true) + requireReceipts(ctx, t, gatewayGrantee, successfulTxnHashes, true) } else { require.Never(t, runSQLCountEq(ctx, t, gatewayGrantee, testQuery, 1), 5*time.Second, 100*time.Millisecond) - requireReceipts(ctx, t, store, successfulTxnHashes, true) - requireReceipts(ctx, t, store, []string{txn.Hash().Hex()}, false) + requireReceipts(ctx, t, gatewayGrantee, successfulTxnHashes, true) + requireReceipts(ctx, t, gatewayGrantee, []string{txn.Hash().Hex()}, false) } } }(test)) @@ -291,7 +291,7 @@ func TestCheckUpdatePrivileges(t *testing.T) { granterSetup := setup.newTablelandClient(t) granteeSetup := setup.newTablelandClient(t) - ctx, backend, sc, store := setup.ctx, setup.ethClient, setup.contract, setup.systemStore + ctx, backend, sc := setup.ctx, setup.ethClient, setup.contract txOptsGranter := granterSetup.txOpts gatewayGrantee, txOptsGrantee := granteeSetup.gateway, granteeSetup.txOpts @@ -335,11 +335,11 @@ func TestCheckUpdatePrivileges(t *testing.T) { 100*time.Millisecond, ) successfulTxnHashes = append(successfulTxnHashes, txn.Hash().Hex()) - requireReceipts(ctx, t, store, successfulTxnHashes, true) + requireReceipts(ctx, t, gatewayGrantee, successfulTxnHashes, true) } else { require.Never(t, runSQLCountEq(ctx, t, gatewayGrantee, testQuery, 1), 5*time.Second, 100*time.Millisecond) - requireReceipts(ctx, t, store, successfulTxnHashes, true) - requireReceipts(ctx, t, store, []string{txn.Hash().Hex()}, false) + requireReceipts(ctx, t, gatewayGrantee, successfulTxnHashes, true) + requireReceipts(ctx, t, gatewayGrantee, []string{txn.Hash().Hex()}, false) } } }(test)) @@ -377,7 +377,7 @@ func TestCheckDeletePrivileges(t *testing.T) { granterSetup := setup.newTablelandClient(t) granteeSetup := setup.newTablelandClient(t) - ctx, backend, sc, store := setup.ctx, setup.ethClient, setup.contract, setup.systemStore + ctx, backend, sc := setup.ctx, setup.ethClient, setup.contract txOptsGranter := granterSetup.txOpts gatewayGrantee, txOptsGrantee := granteeSetup.gateway, granteeSetup.txOpts @@ -419,10 +419,10 @@ func TestCheckDeletePrivileges(t *testing.T) { 100*time.Millisecond, ) successfulTxnHashes = append(successfulTxnHashes, txn.Hash().Hex()) - requireReceipts(ctx, t, store, successfulTxnHashes, true) + requireReceipts(ctx, t, gatewayGrantee, successfulTxnHashes, true) } else { require.Never(t, runSQLCountEq(ctx, t, gatewayGrantee, testQuery, 0), 5*time.Second, 100*time.Millisecond) - requireReceipts(ctx, t, store, []string{txn.Hash().Hex()}, false) + requireReceipts(ctx, t, gatewayGrantee, []string{txn.Hash().Hex()}, false) } } }(test)) @@ -436,7 +436,7 @@ func TestOwnerRevokesItsPrivilegeInsideMultipleStatements(t *testing.T) { build(t) tablelandClient := setup.newTablelandClient(t) - ctx, backend, sc, store := setup.ctx, setup.ethClient, setup.contract, setup.systemStore + ctx, backend, sc := setup.ctx, setup.ethClient, setup.contract gateway, txOpts := tablelandClient.gateway, tablelandClient.txOpts caller := txOpts.From @@ -456,7 +456,7 @@ func TestOwnerRevokesItsPrivilegeInsideMultipleStatements(t *testing.T) { testQuery := "SELECT * FROM foo_1337_1;" cond := runSQLCountEq(ctx, t, gateway, testQuery, 1) require.Never(t, cond, 5*time.Second, 100*time.Millisecond) - requireReceipts(ctx, t, store, []string{txn.Hash().Hex()}, false) + requireReceipts(ctx, t, gateway, []string{txn.Hash().Hex()}, false) } func TestTransferTable(t *testing.T) { @@ -468,7 +468,7 @@ func TestTransferTable(t *testing.T) { owner1Setup := setup.newTablelandClient(t) owner2Setup := setup.newTablelandClient(t) - ctx, backend, sc, store := setup.ctx, setup.ethClient, setup.contract, setup.systemStore + ctx, backend, sc := setup.ctx, setup.ethClient, setup.contract gatewayOwner1, txOptsOwner1 := owner1Setup.gateway, owner1Setup.txOpts gatewayOwner2, txOptsOwner2 := owner2Setup.gateway, owner2Setup.txOpts @@ -496,7 +496,7 @@ func TestTransferTable(t *testing.T) { 5*time.Second, 100*time.Millisecond, ) - requireReceipts(ctx, t, store, []string{txn1.Hash().Hex()}, false) + requireReceipts(ctx, t, gatewayOwner1, []string{txn1.Hash().Hex()}, false) // insert from owner2 will EVENTUALLY go through require.Eventually(t, @@ -504,7 +504,7 @@ func TestTransferTable(t *testing.T) { 5*time.Second, 100*time.Millisecond, ) - requireReceipts(ctx, t, store, []string{txn2.Hash().Hex()}, true) + requireReceipts(ctx, t, gatewayOwner2, []string{txn2.Hash().Hex()}, true) // check registry table new ownership require.Eventually(t, @@ -582,12 +582,12 @@ func jsonEq( func runSQLCountEq( ctx context.Context, t *testing.T, - tbld tableland.Tableland, + gateway gateway.Gateway, sql string, expCount int, ) func() bool { return func() bool { - response, err := runReadQuery(ctx, t, tbld, sql) + response, err := gateway.RunReadQuery(ctx, sql) // if we get a table undefined error, try again if err != nil && strings.Contains(err.Error(), "table not found") { return false @@ -608,17 +608,6 @@ func runSQLCountEq( } } -func runReadQuery( - ctx context.Context, - t *testing.T, - tbld tableland.Tableland, - sql string, -) (interface{}, error) { - t.Helper() - - return tbld.RunReadQuery(ctx, sql) -} - func helpTestWriteQuery( t *testing.T, sc *ethereum.Contract, @@ -649,38 +638,17 @@ func readCsvFile(t *testing.T, filePath string) [][]string { return records } -type aclHalfMock struct { - sqlStore sqlstore.SystemStore -} - -func (acl *aclHalfMock) CheckPrivileges( - ctx context.Context, - tx *sql.Tx, - controller common.Address, - id tables.TableID, - op tableland.Operation, -) (bool, error) { - aclImpl := NewACL(acl.sqlStore) - return aclImpl.CheckPrivileges(ctx, tx, controller, id, op) -} - -func (acl *aclHalfMock) IsOwner(_ context.Context, _ common.Address, _ tables.TableID) (bool, error) { - return true, nil -} - func requireReceipts( ctx context.Context, t *testing.T, - store *system.SystemStore, + gateway gateway.Gateway, txnHashes []string, ok bool, ) { t.Helper() for _, txnHash := range txnHashes { - // TODO: GetReceipt is only used by the tests, we can use system service instead - - receipt, found, err := store.GetReceipt(ctx, txnHash) + receipt, found, err := gateway.GetReceiptByTransactionHash(ctx, 1337, common.HexToHash(txnHash)) require.NoError(t, err) require.True(t, found) require.NotNil(t, receipt) @@ -753,24 +721,22 @@ func (b *tablelandSetupBuilder) build(t *testing.T) *tablelandSetup { dbURI := tests.Sqlite3URI(t) ctx := context.Background() - store, err := system.New(dbURI, tableland.ChainID(1337)) + db, err := database.Open(dbURI, 1) require.NoError(t, err) parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}, b.parsingOpts...) require.NoError(t, err) - db, err := sql.Open("sqlite3", dbURI) - require.NoError(t, err) - db.SetMaxOpenConns(1) - - ex, err := executor.NewExecutor(1337, db, parser, 0, &aclHalfMock{store}) + acl := NewACL(db) + ex, err := executor.NewExecutor(1337, db, parser, 0, acl) require.NoError(t, err) backend, addr, sc, auth, sk := testutil.Setup(t) // Spin up dependencies needed for the EventProcessor. // i.e: Executor, Parser, and EventFeed (connected to the EVM chain) - ef, err := efimpl.New(store, + ef, err := efimpl.New( + efimpl.NewEventFeedStore(db), 1337, backend, addr, @@ -804,8 +770,9 @@ func (b *tablelandSetupBuilder) build(t *testing.T) *tablelandSetup { deployerTxOpts: auth, // common dependencies among mesa clients - parser: parser, - systemStore: store, + parser: parser, + + store: gatewayimpl.NewGatewayStore(db, nil), } } @@ -826,8 +793,8 @@ type tablelandSetup struct { deployerTxOpts *bind.TransactOpts // common dependencies among tableland clients - parser parsing.SQLValidator - systemStore *system.SystemStore + parser parsing.SQLValidator + store gateway.GatewayStore } func (s *tablelandSetup) newTablelandClient(t *testing.T) *tablelandClient { @@ -850,7 +817,7 @@ func (s *tablelandSetup) newTablelandClient(t *testing.T) *tablelandClient { gateway, err := gateway.NewGateway( s.parser, - map[tableland.ChainID]sqlstore.SystemStore{1337: s.systemStore}, + s.store, "https://tableland.network/tables", "https://tables.tableland.xyz", "https://tables.tableland.xyz", diff --git a/internal/tableland/tableland.go b/internal/tableland/tableland.go index 7f018e31..969ac992 100644 --- a/internal/tableland/tableland.go +++ b/internal/tableland/tableland.go @@ -1,159 +1,4 @@ package tableland -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/textileio/go-tableland/pkg/tables" -) - -// Column defines a column in table data. -type Column struct { - Name string `json:"name"` -} - -// TableData defines a tabular representation of query results. -type TableData struct { - Columns []Column `json:"columns"` - Rows [][]*ColumnValue `json:"rows"` -} - -// ColumnValue wraps data from the db that may be raw json or any other value. -type ColumnValue struct { - jsonValue json.RawMessage - otherValue interface{} -} - -// Value returns the underlying value. -func (cv *ColumnValue) Value() interface{} { - if cv.jsonValue != nil { - return cv.jsonValue - } - return cv.otherValue -} - -// Scan implements Scan. -func (cv *ColumnValue) Scan(src interface{}) error { - cv.jsonValue = nil - cv.otherValue = nil - switch src := src.(type) { - case string: - trimmed := strings.TrimLeft(src, " ") - if (strings.HasPrefix(trimmed, "{") || strings.HasPrefix(trimmed, "[")) && json.Valid([]byte(src)) { - cv.jsonValue = []byte(src) - } else { - cv.otherValue = src - } - case []byte: - tmp := make([]byte, len(src)) - copy(tmp, src) - cv.otherValue = tmp - default: - cv.otherValue = src - } - return nil -} - -// MarshalJSON implements MarshalJSON. -func (cv *ColumnValue) MarshalJSON() ([]byte, error) { - if cv.jsonValue != nil { - return cv.jsonValue, nil - } - return json.Marshal(cv.otherValue) -} - -// JSONColValue creates a UserValue with the provided json. -func JSONColValue(v json.RawMessage) *ColumnValue { - return &ColumnValue{jsonValue: v} -} - -// OtherColValue creates a UserValue with the provided other value. -func OtherColValue(v interface{}) *ColumnValue { - return &ColumnValue{otherValue: v} -} - -// TxnReceipt is a Tableland event processing receipt. -type TxnReceipt struct { - ChainID ChainID `json:"chain_id"` - TxnHash string `json:"txn_hash"` - BlockNumber int64 `json:"block_number"` - - TableID *string `json:"table_id,omitempty"` - Error string `json:"error"` - ErrorEventIdx int `json:"error_event_idx"` -} - -// Tableland defines the interface of Tableland. -type Tableland interface { - RunReadQuery(ctx context.Context, stmt string) (*TableData, error) -} - // ChainID is a supported EVM chain identifier. type ChainID int64 - -// Table represents a database table. -type Table struct { - id tables.TableID - prefix string - chainID ChainID -} - -// ChainID returns table's chain id. -func (t Table) ChainID() ChainID { - return t.chainID -} - -// NewTableFromName creates a Table from its name. -func NewTableFromName(name string) (Table, error) { - parts := strings.Split(name, "_") - - if len(parts) < 2 { - return Table{}, errors.New("table name has invalid format") - } - - tableID, err := tables.NewTableID(parts[len(parts)-1]) - if err != nil { - return Table{}, fmt.Errorf("new table id: %s", err) - } - - i, err := strconv.ParseInt(parts[len(parts)-2], 10, 64) - if err != nil { - return Table{}, fmt.Errorf("parse chain id: %s", err) - } - - return Table{ - id: tableID, - prefix: strings.Join(parts[:len(parts)-2], "_"), - chainID: ChainID(i), - }, nil -} - -// EVMEvent is a Tableland on-chain event produced by the Registry SC. -type EVMEvent struct { - Address common.Address - Topics []byte - Data []byte - BlockNumber uint64 - TxHash common.Hash - TxIndex uint - BlockHash common.Hash - Index uint - - // Enhanced fields - ChainID ChainID - EventJSON []byte - EventType string -} - -// EVMBlockInfo contains information about an EVM block. -type EVMBlockInfo struct { - ChainID ChainID - BlockNumber int64 - Timestamp time.Time -} diff --git a/mocks/Gateway.go b/mocks/Gateway.go index 7fc449b5..e73b00df 100644 --- a/mocks/Gateway.go +++ b/mocks/Gateway.go @@ -7,9 +7,9 @@ import ( common "github.com/ethereum/go-ethereum/common" - mock "github.com/stretchr/testify/mock" + gateway "github.com/textileio/go-tableland/internal/gateway" - sqlstore "github.com/textileio/go-tableland/pkg/sqlstore" + mock "github.com/stretchr/testify/mock" tableland "github.com/textileio/go-tableland/internal/tableland" @@ -29,27 +29,27 @@ func (_m *Gateway) EXPECT() *Gateway_Expecter { return &Gateway_Expecter{mock: &_m.Mock} } -// GetReceiptByTransactionHash provides a mock function with given fields: _a0, _a1 -func (_m *Gateway) GetReceiptByTransactionHash(_a0 context.Context, _a1 common.Hash) (sqlstore.Receipt, bool, error) { - ret := _m.Called(_a0, _a1) +// GetReceiptByTransactionHash provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Gateway) GetReceiptByTransactionHash(_a0 context.Context, _a1 tableland.ChainID, _a2 common.Hash) (gateway.Receipt, bool, error) { + ret := _m.Called(_a0, _a1, _a2) - var r0 sqlstore.Receipt - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) sqlstore.Receipt); ok { - r0 = rf(_a0, _a1) + var r0 gateway.Receipt + if rf, ok := ret.Get(0).(func(context.Context, tableland.ChainID, common.Hash) gateway.Receipt); ok { + r0 = rf(_a0, _a1, _a2) } else { - r0 = ret.Get(0).(sqlstore.Receipt) + r0 = ret.Get(0).(gateway.Receipt) } var r1 bool - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) bool); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, tableland.ChainID, common.Hash) bool); ok { + r1 = rf(_a0, _a1, _a2) } else { r1 = ret.Get(1).(bool) } var r2 error - if rf, ok := ret.Get(2).(func(context.Context, common.Hash) error); ok { - r2 = rf(_a0, _a1) + if rf, ok := ret.Get(2).(func(context.Context, tableland.ChainID, common.Hash) error); ok { + r2 = rf(_a0, _a1, _a2) } else { r2 = ret.Error(2) } @@ -64,37 +64,38 @@ type Gateway_GetReceiptByTransactionHash_Call struct { // GetReceiptByTransactionHash is a helper method to define mock.On call // - _a0 context.Context -// - _a1 common.Hash -func (_e *Gateway_Expecter) GetReceiptByTransactionHash(_a0 interface{}, _a1 interface{}) *Gateway_GetReceiptByTransactionHash_Call { - return &Gateway_GetReceiptByTransactionHash_Call{Call: _e.mock.On("GetReceiptByTransactionHash", _a0, _a1)} +// - _a1 tableland.ChainID +// - _a2 common.Hash +func (_e *Gateway_Expecter) GetReceiptByTransactionHash(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Gateway_GetReceiptByTransactionHash_Call { + return &Gateway_GetReceiptByTransactionHash_Call{Call: _e.mock.On("GetReceiptByTransactionHash", _a0, _a1, _a2)} } -func (_c *Gateway_GetReceiptByTransactionHash_Call) Run(run func(_a0 context.Context, _a1 common.Hash)) *Gateway_GetReceiptByTransactionHash_Call { +func (_c *Gateway_GetReceiptByTransactionHash_Call) Run(run func(_a0 context.Context, _a1 tableland.ChainID, _a2 common.Hash)) *Gateway_GetReceiptByTransactionHash_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) + run(args[0].(context.Context), args[1].(tableland.ChainID), args[2].(common.Hash)) }) return _c } -func (_c *Gateway_GetReceiptByTransactionHash_Call) Return(_a0 sqlstore.Receipt, _a1 bool, _a2 error) *Gateway_GetReceiptByTransactionHash_Call { +func (_c *Gateway_GetReceiptByTransactionHash_Call) Return(_a0 gateway.Receipt, _a1 bool, _a2 error) *Gateway_GetReceiptByTransactionHash_Call { _c.Call.Return(_a0, _a1, _a2) return _c } -// GetTableMetadata provides a mock function with given fields: _a0, _a1 -func (_m *Gateway) GetTableMetadata(_a0 context.Context, _a1 tables.TableID) (sqlstore.TableMetadata, error) { - ret := _m.Called(_a0, _a1) +// GetTableMetadata provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Gateway) GetTableMetadata(_a0 context.Context, _a1 tableland.ChainID, _a2 tables.TableID) (gateway.TableMetadata, error) { + ret := _m.Called(_a0, _a1, _a2) - var r0 sqlstore.TableMetadata - if rf, ok := ret.Get(0).(func(context.Context, tables.TableID) sqlstore.TableMetadata); ok { - r0 = rf(_a0, _a1) + var r0 gateway.TableMetadata + if rf, ok := ret.Get(0).(func(context.Context, tableland.ChainID, tables.TableID) gateway.TableMetadata); ok { + r0 = rf(_a0, _a1, _a2) } else { - r0 = ret.Get(0).(sqlstore.TableMetadata) + r0 = ret.Get(0).(gateway.TableMetadata) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, tables.TableID) error); ok { - r1 = rf(_a0, _a1) + if rf, ok := ret.Get(1).(func(context.Context, tableland.ChainID, tables.TableID) error); ok { + r1 = rf(_a0, _a1, _a2) } else { r1 = ret.Error(1) } @@ -109,33 +110,34 @@ type Gateway_GetTableMetadata_Call struct { // GetTableMetadata is a helper method to define mock.On call // - _a0 context.Context -// - _a1 tables.TableID -func (_e *Gateway_Expecter) GetTableMetadata(_a0 interface{}, _a1 interface{}) *Gateway_GetTableMetadata_Call { - return &Gateway_GetTableMetadata_Call{Call: _e.mock.On("GetTableMetadata", _a0, _a1)} +// - _a1 tableland.ChainID +// - _a2 tables.TableID +func (_e *Gateway_Expecter) GetTableMetadata(_a0 interface{}, _a1 interface{}, _a2 interface{}) *Gateway_GetTableMetadata_Call { + return &Gateway_GetTableMetadata_Call{Call: _e.mock.On("GetTableMetadata", _a0, _a1, _a2)} } -func (_c *Gateway_GetTableMetadata_Call) Run(run func(_a0 context.Context, _a1 tables.TableID)) *Gateway_GetTableMetadata_Call { +func (_c *Gateway_GetTableMetadata_Call) Run(run func(_a0 context.Context, _a1 tableland.ChainID, _a2 tables.TableID)) *Gateway_GetTableMetadata_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(tables.TableID)) + run(args[0].(context.Context), args[1].(tableland.ChainID), args[2].(tables.TableID)) }) return _c } -func (_c *Gateway_GetTableMetadata_Call) Return(_a0 sqlstore.TableMetadata, _a1 error) *Gateway_GetTableMetadata_Call { +func (_c *Gateway_GetTableMetadata_Call) Return(_a0 gateway.TableMetadata, _a1 error) *Gateway_GetTableMetadata_Call { _c.Call.Return(_a0, _a1) return _c } // RunReadQuery provides a mock function with given fields: ctx, stmt -func (_m *Gateway) RunReadQuery(ctx context.Context, stmt string) (*tableland.TableData, error) { +func (_m *Gateway) RunReadQuery(ctx context.Context, stmt string) (*gateway.TableData, error) { ret := _m.Called(ctx, stmt) - var r0 *tableland.TableData - if rf, ok := ret.Get(0).(func(context.Context, string) *tableland.TableData); ok { + var r0 *gateway.TableData + if rf, ok := ret.Get(0).(func(context.Context, string) *gateway.TableData); ok { r0 = rf(ctx, stmt) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*tableland.TableData) + r0 = ret.Get(0).(*gateway.TableData) } } @@ -168,7 +170,7 @@ func (_c *Gateway_RunReadQuery_Call) Run(run func(ctx context.Context, stmt stri return _c } -func (_c *Gateway_RunReadQuery_Call) Return(_a0 *tableland.TableData, _a1 error) *Gateway_RunReadQuery_Call { +func (_c *Gateway_RunReadQuery_Call) Return(_a0 *gateway.TableData, _a1 error) *Gateway_RunReadQuery_Call { _c.Call.Return(_a0, _a1) return _c } diff --git a/pkg/sqlstore/impl/system/internal/db/acl.sql.go b/pkg/database/db/acl.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/acl.sql.go rename to pkg/database/db/acl.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/db.go b/pkg/database/db/db.go similarity index 91% rename from pkg/sqlstore/impl/system/internal/db/db.go rename to pkg/database/db/db.go index 329f6140..e5e47d0a 100644 --- a/pkg/sqlstore/impl/system/internal/db/db.go +++ b/pkg/database/db/db.go @@ -57,12 +57,6 @@ func Prepare(ctx context.Context, db DBTX) (*Queries, error) { if q.getTableStmt, err = db.PrepareContext(ctx, getTable); err != nil { return nil, fmt.Errorf("error preparing query GetTable: %w", err) } - if q.getTablesByControllerStmt, err = db.PrepareContext(ctx, getTablesByController); err != nil { - return nil, fmt.Errorf("error preparing query GetTablesByController: %w", err) - } - if q.getTablesByStructureStmt, err = db.PrepareContext(ctx, getTablesByStructure); err != nil { - return nil, fmt.Errorf("error preparing query GetTablesByStructure: %w", err) - } if q.insertBlockExtraInfoStmt, err = db.PrepareContext(ctx, insertBlockExtraInfo); err != nil { return nil, fmt.Errorf("error preparing query InsertBlockExtraInfo: %w", err) } @@ -141,16 +135,6 @@ func (q *Queries) Close() error { err = fmt.Errorf("error closing getTableStmt: %w", cerr) } } - if q.getTablesByControllerStmt != nil { - if cerr := q.getTablesByControllerStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getTablesByControllerStmt: %w", cerr) - } - } - if q.getTablesByStructureStmt != nil { - if cerr := q.getTablesByStructureStmt.Close(); cerr != nil { - err = fmt.Errorf("error closing getTablesByStructureStmt: %w", cerr) - } - } if q.insertBlockExtraInfoStmt != nil { if cerr := q.insertBlockExtraInfoStmt.Close(); cerr != nil { err = fmt.Errorf("error closing insertBlockExtraInfoStmt: %w", cerr) @@ -231,8 +215,6 @@ type Queries struct { getReceiptStmt *sql.Stmt getSchemaByTableNameStmt *sql.Stmt getTableStmt *sql.Stmt - getTablesByControllerStmt *sql.Stmt - getTablesByStructureStmt *sql.Stmt insertBlockExtraInfoStmt *sql.Stmt insertEVMEventStmt *sql.Stmt insertIdStmt *sql.Stmt @@ -256,8 +238,6 @@ func (q *Queries) WithTx(tx *sql.Tx) *Queries { getReceiptStmt: q.getReceiptStmt, getSchemaByTableNameStmt: q.getSchemaByTableNameStmt, getTableStmt: q.getTableStmt, - getTablesByControllerStmt: q.getTablesByControllerStmt, - getTablesByStructureStmt: q.getTablesByStructureStmt, insertBlockExtraInfoStmt: q.insertBlockExtraInfoStmt, insertEVMEventStmt: q.insertEVMEventStmt, insertIdStmt: q.insertIdStmt, diff --git a/pkg/sqlstore/impl/system/internal/db/evm_events.sql.go b/pkg/database/db/evm_events.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/evm_events.sql.go rename to pkg/database/db/evm_events.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/id.sql.go b/pkg/database/db/id.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/id.sql.go rename to pkg/database/db/id.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/models.go b/pkg/database/db/models.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/models.go rename to pkg/database/db/models.go diff --git a/pkg/sqlstore/impl/system/internal/db/nonce.sql.go b/pkg/database/db/nonce.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/nonce.sql.go rename to pkg/database/db/nonce.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/receipt.sql.go b/pkg/database/db/receipt.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/receipt.sql.go rename to pkg/database/db/receipt.sql.go diff --git a/pkg/database/db/registry.sql.go b/pkg/database/db/registry.sql.go new file mode 100644 index 00000000..54b2c8ed --- /dev/null +++ b/pkg/database/db/registry.sql.go @@ -0,0 +1,33 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.15.0 +// source: registry.sql + +package db + +import ( + "context" +) + +const getTable = `-- name: GetTable :one +SELECT id, structure, controller, prefix, created_at, chain_id FROM registry WHERE chain_id =?1 AND id = ?2 +` + +type GetTableParams struct { + ChainID int64 + ID int64 +} + +func (q *Queries) GetTable(ctx context.Context, arg GetTableParams) (Registry, error) { + row := q.queryRow(ctx, q.getTableStmt, getTable, arg.ChainID, arg.ID) + var i Registry + err := row.Scan( + &i.ID, + &i.Structure, + &i.Controller, + &i.Prefix, + &i.CreatedAt, + &i.ChainID, + ) + return i, err +} diff --git a/pkg/sqlstore/impl/system/internal/db/schema.sql.go b/pkg/database/db/schema.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/schema.sql.go rename to pkg/database/db/schema.sql.go diff --git a/pkg/sqlstore/impl/system/migrations/001_init.down.sql b/pkg/database/migrations/001_init.down.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/001_init.down.sql rename to pkg/database/migrations/001_init.down.sql diff --git a/pkg/sqlstore/impl/system/migrations/001_init.up.sql b/pkg/database/migrations/001_init.up.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/001_init.up.sql rename to pkg/database/migrations/001_init.up.sql diff --git a/pkg/sqlstore/impl/system/migrations/002_receipterroridx.down.sql b/pkg/database/migrations/002_receipterroridx.down.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/002_receipterroridx.down.sql rename to pkg/database/migrations/002_receipterroridx.down.sql diff --git a/pkg/sqlstore/impl/system/migrations/002_receipterroridx.up.sql b/pkg/database/migrations/002_receipterroridx.up.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/002_receipterroridx.up.sql rename to pkg/database/migrations/002_receipterroridx.up.sql diff --git a/pkg/sqlstore/impl/system/migrations/003_evm_events.down.sql b/pkg/database/migrations/003_evm_events.down.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/003_evm_events.down.sql rename to pkg/database/migrations/003_evm_events.down.sql diff --git a/pkg/sqlstore/impl/system/migrations/003_evm_events.up.sql b/pkg/database/migrations/003_evm_events.up.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/003_evm_events.up.sql rename to pkg/database/migrations/003_evm_events.up.sql diff --git a/pkg/sqlstore/impl/system/migrations/004_system_id.down.sql b/pkg/database/migrations/004_system_id.down.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/004_system_id.down.sql rename to pkg/database/migrations/004_system_id.down.sql diff --git a/pkg/sqlstore/impl/system/migrations/004_system_id.up.sql b/pkg/database/migrations/004_system_id.up.sql similarity index 100% rename from pkg/sqlstore/impl/system/migrations/004_system_id.up.sql rename to pkg/database/migrations/004_system_id.up.sql diff --git a/pkg/sqlstore/impl/system/migrations/migrations.go b/pkg/database/migrations/migrations.go similarity index 96% rename from pkg/sqlstore/impl/system/migrations/migrations.go rename to pkg/database/migrations/migrations.go index 0be4a939..bb1b6d89 100644 --- a/pkg/sqlstore/impl/system/migrations/migrations.go +++ b/pkg/database/migrations/migrations.go @@ -100,7 +100,7 @@ func _001_initDownSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "001_init.down.sql", size: 25, mode: os.FileMode(420), modTime: time.Unix(1664917754, 0)} + info := bindataFileInfo{name: "001_init.down.sql", size: 25, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -120,7 +120,7 @@ func _001_initUpSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "001_init.up.sql", size: 1907, mode: os.FileMode(420), modTime: time.Unix(1667329547, 0)} + info := bindataFileInfo{name: "001_init.up.sql", size: 1907, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -140,7 +140,7 @@ func _002_receipterroridxDownSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "002_receipterroridx.down.sql", size: 60, mode: os.FileMode(420), modTime: time.Unix(1664917754, 0)} + info := bindataFileInfo{name: "002_receipterroridx.down.sql", size: 60, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -160,7 +160,7 @@ func _002_receipterroridxUpSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "002_receipterroridx.up.sql", size: 129, mode: os.FileMode(420), modTime: time.Unix(1664917754, 0)} + info := bindataFileInfo{name: "002_receipterroridx.up.sql", size: 129, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -180,7 +180,7 @@ func _003_evm_eventsDownSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "003_evm_events.down.sql", size: 59, mode: os.FileMode(420), modTime: time.Unix(1664917754, 0)} + info := bindataFileInfo{name: "003_evm_events.down.sql", size: 59, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -200,7 +200,7 @@ func _003_evm_eventsUpSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "003_evm_events.up.sql", size: 701, mode: os.FileMode(420), modTime: time.Unix(1664917754, 0)} + info := bindataFileInfo{name: "003_evm_events.up.sql", size: 701, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -220,7 +220,7 @@ func _004_system_idDownSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "004_system_id.down.sql", size: 21, mode: os.FileMode(420), modTime: time.Unix(1665062443, 0)} + info := bindataFileInfo{name: "004_system_id.down.sql", size: 21, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -240,7 +240,7 @@ func _004_system_idUpSql() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "004_system_id.up.sql", size: 84, mode: os.FileMode(420), modTime: time.Unix(1665062443, 0)} + info := bindataFileInfo{name: "004_system_id.up.sql", size: 84, mode: os.FileMode(420), modTime: time.Unix(1678739618, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/pkg/sqlstore/impl/system/queries/acl.sql b/pkg/database/queries/acl.sql similarity index 100% rename from pkg/sqlstore/impl/system/queries/acl.sql rename to pkg/database/queries/acl.sql diff --git a/pkg/sqlstore/impl/system/queries/evm_events.sql b/pkg/database/queries/evm_events.sql similarity index 100% rename from pkg/sqlstore/impl/system/queries/evm_events.sql rename to pkg/database/queries/evm_events.sql diff --git a/pkg/sqlstore/impl/system/queries/id.sql b/pkg/database/queries/id.sql similarity index 100% rename from pkg/sqlstore/impl/system/queries/id.sql rename to pkg/database/queries/id.sql diff --git a/pkg/sqlstore/impl/system/queries/nonce.sql b/pkg/database/queries/nonce.sql similarity index 100% rename from pkg/sqlstore/impl/system/queries/nonce.sql rename to pkg/database/queries/nonce.sql diff --git a/pkg/sqlstore/impl/system/queries/receipt.sql b/pkg/database/queries/receipt.sql similarity index 100% rename from pkg/sqlstore/impl/system/queries/receipt.sql rename to pkg/database/queries/receipt.sql diff --git a/pkg/database/queries/registry.sql b/pkg/database/queries/registry.sql new file mode 100644 index 00000000..42d6bd67 --- /dev/null +++ b/pkg/database/queries/registry.sql @@ -0,0 +1,2 @@ +-- name: GetTable :one +SELECT * FROM registry WHERE chain_id =?1 AND id = ?2; diff --git a/pkg/sqlstore/impl/system/queries/schema.sql b/pkg/database/queries/schema.sql similarity index 100% rename from pkg/sqlstore/impl/system/queries/schema.sql rename to pkg/database/queries/schema.sql diff --git a/pkg/sqlstore/impl/system/schemas/schemas.sql b/pkg/database/schemas/schemas.sql similarity index 100% rename from pkg/sqlstore/impl/system/schemas/schemas.sql rename to pkg/database/schemas/schemas.sql diff --git a/pkg/sqlstore/impl/system/sqlc.yaml b/pkg/database/sqlc.yaml similarity index 96% rename from pkg/sqlstore/impl/system/sqlc.yaml rename to pkg/database/sqlc.yaml index eabc8d33..d7dc8186 100644 --- a/pkg/sqlstore/impl/system/sqlc.yaml +++ b/pkg/database/sqlc.yaml @@ -8,7 +8,7 @@ sql: gen: go: package: "db" - out: "./internal/db" + out: "./db" emit_prepared_queries: true emit_interface: false emit_exact_table_names: false diff --git a/pkg/database/sqlite_db.go b/pkg/database/sqlite_db.go new file mode 100644 index 00000000..27cb7f5c --- /dev/null +++ b/pkg/database/sqlite_db.go @@ -0,0 +1,109 @@ +package database + +import ( + "database/sql" + "fmt" + + "github.com/XSAM/otelsql" + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/sqlite3" // migration for sqlite3 + bindata "github.com/golang-migrate/migrate/v4/source/go_bindata" + _ "github.com/mattn/go-sqlite3" // sqlite3 driver + "github.com/rs/zerolog" + logger "github.com/rs/zerolog/log" + "github.com/textileio/go-tableland/pkg/database/db" + "github.com/textileio/go-tableland/pkg/database/migrations" + "github.com/textileio/go-tableland/pkg/metrics" + "go.opentelemetry.io/otel/attribute" +) + +// SQLiteDB represents a SQLite database. +type SQLiteDB struct { + URI string + DB *sql.DB + Queries *db.Queries + Log zerolog.Logger +} + +// OpenSerializable opens a dSQLite database with only one connection open per time. +func OpenSerializable(path string, attributes ...attribute.KeyValue) (*SQLiteDB, error) { + attributes = append(attributes, attribute.String("type", "serializable")) + return Open(path, 1, attributes...) +} + +// OpenConcurrent opens a SQLite database that allows multiple connections. +// Should be used for reads. +func OpenConcurrent(path string, attributes ...attribute.KeyValue) (*SQLiteDB, error) { + attributes = append(attributes, attribute.String("type", "concurrent")) + return Open(path, 0, attributes...) +} + +// Open opens a new SQLite database. +func Open(path string, maxOpenConnections int, attributes ...attribute.KeyValue) (*SQLiteDB, error) { + log := logger.With(). + Str("component", "db"). + Logger() + + attributes = append(attributes, metrics.BaseAttrs...) + sqlDB, err := otelsql.Open("sqlite3", path, otelsql.WithAttributes(attributes...)) + if err != nil { + return nil, fmt.Errorf("connecting to db: %s", err) + } + sqlDB.SetMaxOpenConns(maxOpenConnections) + + if err := otelsql.RegisterDBStatsMetrics(sqlDB, otelsql.WithAttributes( + attributes..., + )); err != nil { + return nil, fmt.Errorf("registering dbstats: %s", err) + } + + database := &SQLiteDB{ + URI: path, + DB: sqlDB, + Queries: db.New(sqlDB), + Log: log, + } + + as := bindata.Resource(migrations.AssetNames(), migrations.Asset) + if err := database.executeMigration(path, as); err != nil { + return nil, fmt.Errorf("initializing db connection: %s", err) + } + + return database, nil +} + +// Close closes the database. +func (db *SQLiteDB) Close() error { + return db.DB.Close() +} + +// executeMigration run db migrations and return a ready to use connection to the SQLite database. +func (db *SQLiteDB) executeMigration(dbURI string, as *bindata.AssetSource) error { + d, err := bindata.WithInstance(as) + if err != nil { + return fmt.Errorf("creating source driver: %s", err) + } + + m, err := migrate.NewWithSourceInstance("go-bindata", d, "sqlite3://"+dbURI) + if err != nil { + return fmt.Errorf("creating migration: %s", err) + } + defer func() { + if _, err := m.Close(); err != nil { + db.Log.Error().Err(err).Msg("closing db migration") + } + }() + + if err := m.Up(); err != nil && err != migrate.ErrNoChange { + return fmt.Errorf("running migration up: %s", err) + } + + version, dirty, err := m.Version() + db.Log.Info(). + Uint("dbVersion", version). + Bool("dirty", dirty). + Err(err). + Msg("database migration executed") + + return nil +} diff --git a/pkg/eventprocessor/eventfeed/eventfeed.go b/pkg/eventprocessor/eventfeed/eventfeed.go index d57edc42..a7c31561 100644 --- a/pkg/eventprocessor/eventfeed/eventfeed.go +++ b/pkg/eventprocessor/eventfeed/eventfeed.go @@ -2,6 +2,7 @@ package eventfeed import ( "context" + "database/sql" "fmt" "math/big" "reflect" @@ -10,6 +11,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/textileio/go-tableland/internal/tableland" tbleth "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" ) @@ -19,11 +21,48 @@ type ChainClient interface { HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) } +// EventFeedStore is the storage layer of EventFeed. +type EventFeedStore interface { + Begin() (*sql.Tx, error) + WithTx(*sql.Tx) EventFeedStore + + AreEVMEventsPersisted(context.Context, tableland.ChainID, common.Hash) (bool, error) + SaveEVMEvents(context.Context, tableland.ChainID, []EVMEvent) error + GetBlocksMissingExtraInfo(context.Context, tableland.ChainID, *int64) ([]int64, error) + InsertBlockExtraInfo(context.Context, tableland.ChainID, int64, uint64) error + GetEVMEvents(context.Context, tableland.ChainID, common.Hash) ([]EVMEvent, error) + GetBlockExtraInfo(context.Context, tableland.ChainID, int64) (EVMBlockInfo, error) +} + // EventFeed provides a stream of on-chain events from a smart contract. type EventFeed interface { Start(ctx context.Context, fromHeight int64, ch chan<- BlockEvents, filterEventTypes []EventType) error } +// EVMEvent is a Tableland on-chain event produced by the Registry SC. +type EVMEvent struct { + Address common.Address + Topics []byte + Data []byte + BlockNumber uint64 + TxHash common.Hash + TxIndex uint + BlockHash common.Hash + Index uint + + // Enhanced fields + ChainID tableland.ChainID + EventJSON []byte + EventType string +} + +// EVMBlockInfo contains information about an EVM block. +type EVMBlockInfo struct { + ChainID tableland.ChainID + BlockNumber int64 + Timestamp time.Time +} + // BlockEvents contains a set of events for a particular block height. type BlockEvents struct { BlockNumber int64 diff --git a/pkg/eventprocessor/eventfeed/impl/analytics.go b/pkg/eventprocessor/eventfeed/impl/analytics.go index 8c6ac8d8..1483f097 100644 --- a/pkg/eventprocessor/eventfeed/impl/analytics.go +++ b/pkg/eventprocessor/eventfeed/impl/analytics.go @@ -19,7 +19,7 @@ func (ef *EventFeed) fetchExtraBlockInfo(ctx context.Context) { ef.log.Info().Msg("graceful close of extra block info fetcher") return } - blockNumbers, err := ef.systemStore.GetBlocksMissingExtraInfo(ctx, fromHeight) + blockNumbers, err := ef.store.GetBlocksMissingExtraInfo(ctx, ef.chainID, fromHeight) if err != nil { ef.log.Error().Err(err).Msg("get blocks without extra info") continue @@ -52,7 +52,7 @@ func (ef *EventFeed) fetchExtraBlockInfo(ctx context.Context) { Msg("capturing new block metric") return } - if err := ef.systemStore.InsertBlockExtraInfo(ctx, blockNumber, block.Time); err != nil { + if err := ef.store.InsertBlockExtraInfo(ctx, ef.chainID, blockNumber, block.Time); err != nil { ef.log.Error(). Err(err). Int64("block_number", blockNumber). diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed.go b/pkg/eventprocessor/eventfeed/impl/eventfeed.go index 1d26d11e..c36df3ff 100644 --- a/pkg/eventprocessor/eventfeed/impl/eventfeed.go +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed.go @@ -20,8 +20,9 @@ import ( logger "github.com/rs/zerolog/log" "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" + "github.com/textileio/go-tableland/pkg/sharedmemory" - "github.com/textileio/go-tableland/pkg/sqlstore" + tbleth "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" "github.com/textileio/go-tableland/pkg/telemetry" "go.opentelemetry.io/otel/attribute" @@ -36,7 +37,7 @@ const ( // EventFeed provides a stream of filtered events from a SC. type EventFeed struct { log zerolog.Logger - systemStore sqlstore.SystemStore + store eventfeed.EventFeedStore chainID tableland.ChainID ethClient eventfeed.ChainClient scAddress common.Address @@ -55,7 +56,7 @@ type EventFeed struct { // New returns a new EventFeed. func New( - systemStore sqlstore.SystemStore, + store eventfeed.EventFeedStore, chainID tableland.ChainID, ethClient eventfeed.ChainClient, scAddress common.Address, @@ -79,7 +80,7 @@ func New( ef := &EventFeed{ sm: sm, log: log, - systemStore: systemStore, + store: store, chainID: chainID, ethClient: ethClient, scAddress: scAddress, @@ -403,7 +404,7 @@ func (ef *EventFeed) persistEvents(ctx context.Context, events []types.Log, pars cfg := jsoniter.Config{}.Froze() cfg.RegisterExtension(&omitRawFieldExtension{}) - tx, err := ef.systemStore.Begin(ctx) + tx, err := ef.store.Begin() if err != nil { return fmt.Errorf("opening db tx: %s", err) } @@ -413,10 +414,10 @@ func (ef *EventFeed) persistEvents(ctx context.Context, events []types.Log, pars } }() - store := ef.systemStore.WithTx(tx) + store := ef.store.WithTx(tx) persistedTxnHashEvents := map[common.Hash]bool{} - tblEvents := make([]tableland.EVMEvent, 0, len(events)) + tblEvents := make([]eventfeed.EVMEvent, 0, len(events)) for i, e := range events { // If we already have registered events for the TxHash, we skip persisting this event. // This means that one of two things happened: @@ -428,7 +429,7 @@ func (ef *EventFeed) persistEvents(ctx context.Context, events []types.Log, pars // is the event information we save to be coherent with execution. In any case, a validator config that allows // reorgs isn't safe for state coherence between validators so this should only happen in test environments. if _, ok := persistedTxnHashEvents[e.TxHash]; !ok { - areTxnHashEventsPersisted, err := store.AreEVMEventsPersisted(ctx, e.TxHash) + areTxnHashEventsPersisted, err := store.AreEVMEventsPersisted(ctx, ef.chainID, e.TxHash) if err != nil { return fmt.Errorf("check if evm txn events are persisted: %s", err) } @@ -452,7 +453,7 @@ func (ef *EventFeed) persistEvents(ctx context.Context, events []types.Log, pars } // The reflect names are *ethereum.XXXXX, so we get only XXXXX. eventType := strings.SplitN(reflect.TypeOf(parsedEvents[i]).String(), ".", 2)[1] - tblEvent := tableland.EVMEvent{ + tblEvent := eventfeed.EVMEvent{ // Direct mapping from types.Log Address: e.Address, Topics: topicsJSONBytes, @@ -474,7 +475,7 @@ func (ef *EventFeed) persistEvents(ctx context.Context, events []types.Log, pars } } - if err := store.SaveEVMEvents(ctx, tblEvents); err != nil { + if err := store.SaveEVMEvents(ctx, ef.chainID, tblEvents); err != nil { return fmt.Errorf("persisting events: %s", err) } @@ -496,7 +497,7 @@ func (e *omitRawFieldExtension) UpdateStructDescriptor(structDescriptor *jsonite } } -func toNewTablelandEvent(e tableland.EVMEvent) telemetry.NewTablelandEventMetric { +func toNewTablelandEvent(e eventfeed.EVMEvent) telemetry.NewTablelandEventMetric { return telemetry.NewTablelandEventMetric{ Address: e.Address.String(), Topics: e.Topics, diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed_store.go b/pkg/eventprocessor/eventfeed/impl/eventfeed_store.go new file mode 100644 index 00000000..c912e4da --- /dev/null +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed_store.go @@ -0,0 +1,355 @@ +package impl + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" + "github.com/textileio/go-tableland/pkg/database/db" + "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" + "github.com/textileio/go-tableland/pkg/metrics" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/metric/instrument" +) + +// EventFeedStore is the storage layer for EventFeed. +type EventFeedStore struct { + db *database.SQLiteDB +} + +var _ eventfeed.EventFeedStore = (*EventFeedStore)(nil) + +// NewEventFeedStore creates a new feed store. +func NewEventFeedStore(db *database.SQLiteDB) *EventFeedStore { + return &EventFeedStore{ + db: db, + } +} + +// Begin starts a tx. +func (s *EventFeedStore) Begin() (*sql.Tx, error) { + return s.db.DB.Begin() +} + +// WithTx returns an EventFeedStore with a tx attached. +func (s *EventFeedStore) WithTx(tx *sql.Tx) eventfeed.EventFeedStore { + return &EventFeedStore{ + db: &database.SQLiteDB{ + URI: s.db.URI, + DB: s.db.DB, + Queries: s.db.Queries.WithTx(tx), + Log: s.db.Log, + }, + } +} + +// AreEVMEventsPersisted returns true if there're events persisted for the provided txn hash, and false otherwise. +func (s *EventFeedStore) AreEVMEventsPersisted( + ctx context.Context, chainID tableland.ChainID, txnHash common.Hash, +) (bool, error) { + params := db.AreEVMEventsPersistedParams{ + ChainID: int64(chainID), + TxHash: txnHash.Hex(), + } + + _, err := s.db.Queries.AreEVMEventsPersisted(ctx, params) + if err == sql.ErrNoRows { + return false, nil + } + if err != nil { + return false, fmt.Errorf("evm txn events lookup: %s", err) + } + return true, nil +} + +// SaveEVMEvents saves the provider EVMEvents. +func (s *EventFeedStore) SaveEVMEvents( + ctx context.Context, chainID tableland.ChainID, events []eventfeed.EVMEvent, +) error { + queries := s.db.Queries + for _, e := range events { + args := db.InsertEVMEventParams{ + ChainID: int64(chainID), + EventJson: string(e.EventJSON), + EventType: e.EventType, + Address: e.Address.Hex(), + Topics: string(e.Topics), + Data: e.Data, + BlockNumber: int64(e.BlockNumber), + TxHash: e.TxHash.Hex(), + TxIndex: e.TxIndex, + BlockHash: e.BlockHash.Hex(), + EventIndex: e.Index, + } + if err := queries.InsertEVMEvent(ctx, args); err != nil { + return fmt.Errorf("insert evm event: %s", err) + } + } + + return nil +} + +// GetBlocksMissingExtraInfo returns a list of block numbers that don't contain enhanced information. +// It receives an optional fromHeight to only look for blocks after a block number. If null it will look +// for blocks at any height. +func (s *EventFeedStore) GetBlocksMissingExtraInfo( + ctx context.Context, chainID tableland.ChainID, lastKnownHeight *int64, +) ([]int64, error) { + var blockNumbers []int64 + var err error + if lastKnownHeight == nil { + blockNumbers, err = s.db.Queries.GetBlocksMissingExtraInfo(ctx, int64(chainID)) + } else { + params := db.GetBlocksMissingExtraInfoByBlockNumberParams{ + ChainID: int64(chainID), + BlockNumber: *lastKnownHeight, + } + blockNumbers, err = s.db.Queries.GetBlocksMissingExtraInfoByBlockNumber(ctx, params) + } + if err == sql.ErrNoRows { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("get blocks missing extra info: %s", err) + } + + return blockNumbers, nil +} + +// InsertBlockExtraInfo inserts enhanced information for a block. +func (s *EventFeedStore) InsertBlockExtraInfo( + ctx context.Context, chainID tableland.ChainID, blockNumber int64, timestamp uint64, +) error { + params := db.InsertBlockExtraInfoParams{ + ChainID: int64(chainID), + BlockNumber: blockNumber, + Timestamp: int64(timestamp), + } + if err := s.db.Queries.InsertBlockExtraInfo(ctx, params); err != nil { + return fmt.Errorf("insert block extra info: %s", err) + } + + return nil +} + +// GetEVMEvents returns all the persisted events for a transaction. +func (s *EventFeedStore) GetEVMEvents( + ctx context.Context, chainID tableland.ChainID, txnHash common.Hash, +) ([]eventfeed.EVMEvent, error) { + args := db.GetEVMEventsParams{ + ChainID: int64(chainID), + TxHash: txnHash.Hex(), + } + events, err := s.db.Queries.GetEVMEvents(ctx, args) + if err != nil { + return nil, fmt.Errorf("get events by txhash: %s", err) + } + + ret := make([]eventfeed.EVMEvent, len(events)) + for i, event := range events { + ret[i] = eventfeed.EVMEvent{ + Address: common.HexToAddress(event.Address), + Topics: []byte(event.Topics), + Data: event.Data, + BlockNumber: uint64(event.BlockNumber), + TxHash: common.HexToHash(event.TxHash), + TxIndex: event.TxIndex, + BlockHash: common.HexToHash(event.BlockHash), + Index: event.EventIndex, + ChainID: tableland.ChainID(event.ChainID), + EventJSON: []byte(event.EventJson), + EventType: event.EventType, + } + } + + return ret, nil +} + +// GetBlockExtraInfo info returns stored information about an EVM block. +func (s *EventFeedStore) GetBlockExtraInfo( + ctx context.Context, chainID tableland.ChainID, blockNumber int64, +) (eventfeed.EVMBlockInfo, error) { + params := db.GetBlockExtraInfoParams{ + ChainID: int64(chainID), + BlockNumber: blockNumber, + } + + blockInfo, err := s.db.Queries.GetBlockExtraInfo(ctx, params) + if err == sql.ErrNoRows { + return eventfeed.EVMBlockInfo{}, fmt.Errorf("block information not found: %w", err) + } + if err != nil { + return eventfeed.EVMBlockInfo{}, fmt.Errorf("get block information: %s", err) + } + + return eventfeed.EVMBlockInfo{ + ChainID: tableland.ChainID(blockInfo.ChainID), + BlockNumber: blockInfo.BlockNumber, + Timestamp: time.Unix(blockInfo.Timestamp, 0), + }, nil +} + +// InstrutmentedEventFeedStore is the intrumented storage layer for EventFeed. +type InstrutmentedEventFeedStore struct { + store eventfeed.EventFeedStore + callCount instrument.Int64Counter + latencyHistogram instrument.Int64Histogram +} + +var _ eventfeed.EventFeedStore = (*InstrutmentedEventFeedStore)(nil) + +// NewInstrumentedEventFeedStore creates a new feed store. +func NewInstrumentedEventFeedStore(db *database.SQLiteDB) (*InstrutmentedEventFeedStore, error) { + meter := global.MeterProvider().Meter("tableland") + callCount, err := meter.Int64Counter("tableland.eventfeed.store.call.count") + if err != nil { + return &InstrutmentedEventFeedStore{}, fmt.Errorf("registering call counter: %s", err) + } + latencyHistogram, err := meter.Int64Histogram("tableland.eventfeed.store.latency") + if err != nil { + return &InstrutmentedEventFeedStore{}, fmt.Errorf("registering latency histogram: %s", err) + } + + return &InstrutmentedEventFeedStore{ + store: NewEventFeedStore(db), + callCount: callCount, + latencyHistogram: latencyHistogram, + }, nil +} + +// Begin starts a tx. +func (s *InstrutmentedEventFeedStore) Begin() (*sql.Tx, error) { + return s.store.Begin() +} + +// WithTx returns an EventFeedStore with a tx attached. +func (s *InstrutmentedEventFeedStore) WithTx(tx *sql.Tx) eventfeed.EventFeedStore { + return s.store.WithTx(tx) +} + +// AreEVMEventsPersisted returns true if there're events persisted for the provided txn hash, and false otherwise. +func (s *InstrutmentedEventFeedStore) AreEVMEventsPersisted( + ctx context.Context, chainID tableland.ChainID, txnHash common.Hash, +) (bool, error) { + start := time.Now() + ok, err := s.store.AreEVMEventsPersisted(ctx, chainID, txnHash) + latency := time.Since(start).Milliseconds() + + attributes := append([]attribute.KeyValue{ + {Key: "method", Value: attribute.StringValue("AreEVMEventsPersisted")}, + {Key: "success", Value: attribute.BoolValue(err == nil)}, + {Key: "chainID", Value: attribute.Int64Value(int64(chainID))}, + }, metrics.BaseAttrs...) + + s.callCount.Add(ctx, 1, attributes...) + s.latencyHistogram.Record(ctx, latency, attributes...) + + return ok, err +} + +// SaveEVMEvents saves the provider EVMEvents. +func (s *InstrutmentedEventFeedStore) SaveEVMEvents( + ctx context.Context, chainID tableland.ChainID, events []eventfeed.EVMEvent, +) error { + start := time.Now() + err := s.store.SaveEVMEvents(ctx, chainID, events) + latency := time.Since(start).Milliseconds() + + attributes := append([]attribute.KeyValue{ + {Key: "method", Value: attribute.StringValue("SaveEVMEvents")}, + {Key: "success", Value: attribute.BoolValue(err == nil)}, + {Key: "chainID", Value: attribute.Int64Value(int64(chainID))}, + }, metrics.BaseAttrs...) + + s.callCount.Add(ctx, 1, attributes...) + s.latencyHistogram.Record(ctx, latency, attributes...) + + return err +} + +// GetBlocksMissingExtraInfo returns a list of block numbers that don't contain enhanced information. +// It receives an optional fromHeight to only look for blocks after a block number. If null it will look +// for blocks at any height. +func (s *InstrutmentedEventFeedStore) GetBlocksMissingExtraInfo( + ctx context.Context, chainID tableland.ChainID, lastKnownHeight *int64, +) ([]int64, error) { + start := time.Now() + blocks, err := s.store.GetBlocksMissingExtraInfo(ctx, chainID, lastKnownHeight) + latency := time.Since(start).Milliseconds() + + attributes := append([]attribute.KeyValue{ + {Key: "method", Value: attribute.StringValue("GetBlocksMissingExtraInfo")}, + {Key: "success", Value: attribute.BoolValue(err == nil)}, + {Key: "chainID", Value: attribute.Int64Value(int64(chainID))}, + }, metrics.BaseAttrs...) + + s.callCount.Add(ctx, 1, attributes...) + s.latencyHistogram.Record(ctx, latency, attributes...) + + return blocks, err +} + +// InsertBlockExtraInfo inserts enhanced information for a block. +func (s *InstrutmentedEventFeedStore) InsertBlockExtraInfo( + ctx context.Context, chainID tableland.ChainID, blockNumber int64, timestamp uint64, +) error { + start := time.Now() + err := s.store.InsertBlockExtraInfo(ctx, chainID, blockNumber, timestamp) + latency := time.Since(start).Milliseconds() + + attributes := append([]attribute.KeyValue{ + {Key: "method", Value: attribute.StringValue("InsertBlockExtraInfo")}, + {Key: "success", Value: attribute.BoolValue(err == nil)}, + {Key: "chainID", Value: attribute.Int64Value(int64(chainID))}, + }, metrics.BaseAttrs...) + + s.callCount.Add(ctx, 1, attributes...) + s.latencyHistogram.Record(ctx, latency, attributes...) + + return err +} + +// GetEVMEvents returns all the persisted events for a transaction. +func (s *InstrutmentedEventFeedStore) GetEVMEvents( + ctx context.Context, chainID tableland.ChainID, txnHash common.Hash, +) ([]eventfeed.EVMEvent, error) { + start := time.Now() + evmEvents, err := s.store.GetEVMEvents(ctx, chainID, txnHash) + latency := time.Since(start).Milliseconds() + + attributes := append([]attribute.KeyValue{ + {Key: "method", Value: attribute.StringValue("GetEVMEvents")}, + {Key: "success", Value: attribute.BoolValue(err == nil)}, + {Key: "chainID", Value: attribute.Int64Value(int64(chainID))}, + }, metrics.BaseAttrs...) + + s.callCount.Add(ctx, 1, attributes...) + s.latencyHistogram.Record(ctx, latency, attributes...) + + return evmEvents, err +} + +// GetBlockExtraInfo info returns stored information about an EVM block. +func (s *InstrutmentedEventFeedStore) GetBlockExtraInfo( + ctx context.Context, chainID tableland.ChainID, blockNumber int64, +) (eventfeed.EVMBlockInfo, error) { + start := time.Now() + blockInfo, err := s.store.GetBlockExtraInfo(ctx, chainID, blockNumber) + latency := time.Since(start).Milliseconds() + + attributes := append([]attribute.KeyValue{ + {Key: "method", Value: attribute.StringValue("GetBlockExtraInfo")}, + {Key: "success", Value: attribute.BoolValue(err == nil)}, + {Key: "chainID", Value: attribute.Int64Value(int64(chainID))}, + }, metrics.BaseAttrs...) + + s.callCount.Add(ctx, 1, attributes...) + s.latencyHistogram.Record(ctx, latency, attributes...) + + return blockInfo, err +} diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go b/pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go new file mode 100644 index 00000000..4da30aa7 --- /dev/null +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go @@ -0,0 +1,89 @@ +package impl + +import ( + "context" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" + "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" + "github.com/textileio/go-tableland/tests" +) + +func TestEVMEventPersistence(t *testing.T) { + t.Parallel() + + ctx := context.Background() + dbURI := tests.Sqlite3URI(t) + + chainID := tableland.ChainID(1337) + + db, err := database.Open(dbURI, 1) + require.NoError(t, err) + + store := NewEventFeedStore(db) + + testData := []eventfeed.EVMEvent{ + { + Address: common.HexToAddress("0x10"), + Topics: []byte(`["0x111,"0x122"]`), + Data: []byte("data1"), + BlockNumber: 1, + TxHash: common.HexToHash("0x11"), + TxIndex: 11, + BlockHash: common.HexToHash("0x12"), + Index: 12, + ChainID: chainID, + EventJSON: []byte("eventjson1"), + EventType: "Type1", + }, + { + Address: common.HexToAddress("0x20"), + Topics: []byte(`["0x211,"0x222"]`), + Data: []byte("data2"), + BlockNumber: 2, + TxHash: common.HexToHash("0x21"), + TxIndex: 11, + BlockHash: common.HexToHash("0x22"), + Index: 12, + ChainID: chainID, + EventJSON: []byte("eventjson2"), + EventType: "Type2", + }, + } + + // Check that AreEVMEventsPersisted for the future txn hashes aren't found. + for _, event := range testData { + exists, err := store.AreEVMEventsPersisted(ctx, chainID, event.TxHash) + require.NoError(t, err) + require.False(t, exists) + } + + err = store.SaveEVMEvents(ctx, chainID, testData) + require.NoError(t, err) + + // Check that AreEVMEventsPersisted for the future txn hashes are found, and the data matches. + for _, event := range testData { + exists, err := store.AreEVMEventsPersisted(ctx, chainID, event.TxHash) + require.NoError(t, err) + require.True(t, exists) + + events, err := store.GetEVMEvents(ctx, chainID, event.TxHash) + require.NoError(t, err) + require.Len(t, events, 1) + + require.Equal(t, events[0].Address, event.Address) + require.Equal(t, events[0].Topics, event.Topics) + require.Equal(t, events[0].Data, event.Data) + require.Equal(t, events[0].BlockNumber, event.BlockNumber) + require.Equal(t, events[0].TxHash, event.TxHash) + require.Equal(t, events[0].TxIndex, event.TxIndex) + require.Equal(t, events[0].BlockHash, event.BlockHash) + require.Equal(t, events[0].Index, event.Index) + require.Equal(t, events[0].ChainID, chainID) + require.Equal(t, events[0].EventJSON, event.EventJSON) + require.Equal(t, events[0].EventType, event.EventType) + } +} diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go index a78eaf71..5145ebb5 100644 --- a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go @@ -14,10 +14,11 @@ import ( "github.com/ethereum/go-ethereum/ethclient" _ "github.com/mattn/go-sqlite3" "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" + "github.com/textileio/go-tableland/pkg/sharedmemory" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" + "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/tests" @@ -29,12 +30,12 @@ func TestRunSQLEvents(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - systemStore, err := system.New(dbURI, tableland.ChainID(1337)) + db, err := database.Open(dbURI, 1) require.NoError(t, err) backend, addr, sc, authOpts, _ := testutil.Setup(t) ef, err := New( - systemStore, + NewEventFeedStore(db), 1337, backend, addr, @@ -110,13 +111,15 @@ func TestAllEvents(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - systemStore, err := system.New(dbURI, tableland.ChainID(1337)) + db, err := database.Open(dbURI, 1) require.NoError(t, err) + store := NewEventFeedStore(db) + backend, addr, sc, authOpts, _ := testutil.Setup(t) fetchBlockExtraInfoDelay = time.Millisecond ef, err := New( - systemStore, + store, 1337, backend, addr, @@ -146,7 +149,7 @@ func TestAllEvents(t *testing.T) { // 10 is an arbitrary choice to make it future proof if the setup stage decides to mine // some extra blocks, so we make sure we're 100% clean. for i := int64(0); i < 10; i++ { - _, err = ef.systemStore.GetBlockExtraInfo(ctx, i) + _, err = ef.store.GetBlockExtraInfo(ctx, ef.chainID, i) require.Error(t, err) } @@ -187,7 +190,7 @@ func TestAllEvents(t *testing.T) { require.NotEqual(t, emptyHash, bes.Txns[0].TxnHash) require.IsType(t, ðereum.ContractCreateTable{}, bes.Txns[0].Events[0]) - evmEvents, err := systemStore.GetEVMEvents(ctx, bes.Txns[0].TxnHash) + evmEvents, err := store.GetEVMEvents(ctx, ef.chainID, bes.Txns[0].TxnHash) require.NoError(t, err) evmEvent := evmEvents[0] @@ -209,7 +212,7 @@ func TestAllEvents(t *testing.T) { require.NotEqual(t, emptyHash, bes.Txns[1].TxnHash) require.IsType(t, ðereum.ContractRunSQL{}, bes.Txns[1].Events[0]) - evmEvents, err := systemStore.GetEVMEvents(ctx, bes.Txns[1].TxnHash) + evmEvents, err := store.GetEVMEvents(ctx, ef.chainID, bes.Txns[1].TxnHash) require.NoError(t, err) evmEvent := evmEvents[0] @@ -230,7 +233,7 @@ func TestAllEvents(t *testing.T) { { require.IsType(t, ðereum.ContractSetController{}, bes.Txns[2].Events[0]) - evmEvents, err := systemStore.GetEVMEvents(ctx, bes.Txns[2].TxnHash) + evmEvents, err := store.GetEVMEvents(ctx, ef.chainID, bes.Txns[2].TxnHash) require.NoError(t, err) evmEvent := evmEvents[0] @@ -251,7 +254,7 @@ func TestAllEvents(t *testing.T) { { require.IsType(t, ðereum.ContractTransferTable{}, bes.Txns[3].Events[0]) - evmEvents, err := systemStore.GetEVMEvents(ctx, bes.Txns[3].TxnHash) + evmEvents, err := store.GetEVMEvents(ctx, ef.chainID, bes.Txns[3].TxnHash) require.NoError(t, err) evmEvent := evmEvents[0] @@ -268,9 +271,9 @@ func TestAllEvents(t *testing.T) { require.Equal(t, uint(5), evmEvent.Index) } - var bi tableland.EVMBlockInfo + var bi eventfeed.EVMBlockInfo require.Eventually(t, func() bool { - bi, err = ef.systemStore.GetBlockExtraInfo(ctx, bes.BlockNumber) + bi, err = ef.store.GetBlockExtraInfo(ctx, ef.chainID, bes.BlockNumber) return err == nil }, time.Second*10, time.Second) require.Equal(t, txn1.ChainId().Int64(), int64(bi.ChainID)) @@ -299,10 +302,10 @@ func TestInfura(t *testing.T) { rinkebyContractAddr := common.HexToAddress("0x847645b7dAA32eFda757d3c10f1c82BFbB7b41D0") dbURI := tests.Sqlite3URI(t) - systemStore, err := system.New(dbURI, tableland.ChainID(1337)) + db, err := database.Open(dbURI, 1) require.NoError(t, err) ef, err := New( - systemStore, + NewEventFeedStore(db), 1337, conn, rinkebyContractAddr, @@ -348,22 +351,16 @@ func TestDuplicateEvents(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - systemStore, err := system.New(dbURI, tableland.ChainID(1337)) + db, err := database.Open(dbURI, 1) require.NoError(t, err) - backend := duplicateEventsChainClient{} - - // Deploy address for Registry contract. - address := common.HexToAddress("0x0b9737ab4b3e5303cb67db031b509697e31c02d3") - if len(address.Bytes()) == 0 { - t.Error("Expected a valid deployment address. Received empty address byte array instead") - } + efStore := NewEventFeedStore(db) ef, err := New( - systemStore, + efStore, 1337, - backend, - address, + duplicateEventsChainClient{}, + common.HexToAddress("0x0b9737ab4b3e5303cb67db031b509697e31c02d3"), sharedmemory.NewSharedMemory(), eventfeed.WithNewHeadPollFreq(time.Millisecond), eventfeed.WithMinBlockDepth(0), @@ -380,7 +377,7 @@ func TestDuplicateEvents(t *testing.T) { select { case bes := <-ch: - persistedEvents, err := systemStore.GetEVMEvents(context.Background(), bes.Txns[0].TxnHash) + persistedEvents, err := efStore.GetEVMEvents(context.Background(), 1337, bes.Txns[0].TxnHash) require.NoError(t, err) require.Len(t, persistedEvents, 1) diff --git a/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go b/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go index c236bde8..b4e9c5fd 100644 --- a/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go +++ b/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" efimpl "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed/impl" @@ -24,7 +25,7 @@ import ( "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" "github.com/textileio/go-tableland/pkg/sharedmemory" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" + "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/tests" ) @@ -79,14 +80,13 @@ func launchValidatorForAllChainsBackedByEVMHistory(t *testing.T, historyDBURI st parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - db, err := sql.Open("sqlite3", dbURI) + db, err := database.Open(dbURI, 1) require.NoError(t, err) - db.SetMaxOpenConns(1) chains := getChains(t, historyDBURI) eps := make([]*EventProcessor, len(chains)) for i, chain := range chains { - eps[i] = spinValidatorStackForChainID(t, dbURI, historyDBURI, parser, chain.chainID, chain.scAddress, db) + eps[i] = spinValidatorStackForChainID(t, historyDBURI, parser, chain.chainID, chain.scAddress, db) } waitForSynced := func() { @@ -111,24 +111,19 @@ func launchValidatorForAllChainsBackedByEVMHistory(t *testing.T, historyDBURI st func spinValidatorStackForChainID( t *testing.T, - dbURI string, historyDBURI string, parser parsing.SQLValidator, chainID tableland.ChainID, scAddress common.Address, - db *sql.DB, + db *database.SQLiteDB, ) *EventProcessor { ex, err := executor.NewExecutor(chainID, db, parser, 0, &aclMock{}) require.NoError(t, err) - - systemStore, err := system.New(dbURI, chainID) - require.NoError(t, err) - eventBasedBackend, err := sqlitechainclient.New(historyDBURI, chainID) require.NoError(t, err) ef, err := efimpl.New( - systemStore, + efimpl.NewEventFeedStore(db), chainID, eventBasedBackend, scAddress, @@ -204,3 +199,16 @@ func getHistoryDBURI(t *testing.T) string { // Return full path of prepared database. return fmt.Sprintf("file:%s?", historyDBFilePath) } + +type aclMock struct{} + +func (acl *aclMock) CheckPrivileges( + _ context.Context, + _ *sql.Tx, + _ tableland.ChainID, + _ common.Address, + _ tables.TableID, + _ tableland.Operation, +) (bool, error) { + return true, nil +} diff --git a/pkg/eventprocessor/impl/eventprocessor_test.go b/pkg/eventprocessor/impl/eventprocessor_test.go index 42704e61..a122f53a 100644 --- a/pkg/eventprocessor/impl/eventprocessor_test.go +++ b/pkg/eventprocessor/impl/eventprocessor_test.go @@ -2,7 +2,6 @@ package impl import ( "context" - "database/sql" "math/big" "strconv" "testing" @@ -10,16 +9,19 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/internal/tableland" + gatewayimpl "github.com/textileio/go-tableland/internal/gateway/impl" + "github.com/textileio/go-tableland/internal/tableland/impl" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" efimpl "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed/impl" executor "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor/impl" - "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" + "github.com/textileio/go-tableland/pkg/sharedmemory" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" + "github.com/textileio/go-tableland/pkg/tables" + "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/tests" ) @@ -317,18 +319,19 @@ func setup(t *testing.T) ( parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - db, err := sql.Open("sqlite3", dbURI) - require.NoError(t, err) - db.SetMaxOpenConns(1) - ex, err := executor.NewExecutor(chainID, db, parser, 0, &aclMock{}) + db, err := database.Open(dbURI, 1) require.NoError(t, err) sm := sharedmemory.NewSharedMemory() + ex, err := executor.NewExecutor(chainID, db, parser, 0, impl.NewACL(db)) + + require.NoError(t, err) - systemStore, err := system.New(dbURI, tableland.ChainID(chainID)) + db2, err := database.Open(dbURI, 1) require.NoError(t, err) + ef, err := efimpl.New( - systemStore, + efimpl.NewEventFeedStore(db2), chainID, backend, addr, @@ -376,17 +379,14 @@ func setup(t *testing.T) ( } require.NoError(t, err) - store, err := system.New( - dbURI, 1337) + db, err = database.Open(dbURI, 1) require.NoError(t, err) - store.SetReadResolver(parsing.NewReadStatementResolver(sm)) - tableReader := func(readQuery string) []int64 { rq, err := parser.ValidateReadQuery(readQuery) require.NoError(t, err) require.NotNil(t, rq) - res, err := store.Read(ctx, rq) + res, err := gatewayimpl.NewGatewayStore(db, nil).Read(ctx, rq) require.NoError(t, err) ret := make([]int64, len(res.Rows)) @@ -399,7 +399,9 @@ func setup(t *testing.T) ( checkReceipts := func(t *testing.T, rs ...eventprocessor.Receipt) func() bool { return func() bool { for _, expReceipt := range rs { - gotReceipt, found, err := systemStore.GetReceipt(context.Background(), expReceipt.TxnHash) + gotReceipt, found, err := gatewayimpl. + NewGatewayStore(db, nil). + GetReceipt(context.Background(), 1337, expReceipt.TxnHash) require.NoError(t, err) if !found { return false @@ -428,15 +430,3 @@ func setup(t *testing.T) ( transfer: transferFrom, }, checkReceipts, tableReader } - -type aclMock struct{} - -func (acl *aclMock) CheckPrivileges( - _ context.Context, - _ *sql.Tx, - _ common.Address, - _ tables.TableID, - _ tableland.Operation, -) (bool, error) { - return true, nil -} diff --git a/pkg/eventprocessor/impl/executor/impl/executor.go b/pkg/eventprocessor/impl/executor/impl/executor.go index 12f593d5..3d5827b1 100644 --- a/pkg/eventprocessor/impl/executor/impl/executor.go +++ b/pkg/eventprocessor/impl/executor/impl/executor.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog" logger "github.com/rs/zerolog/log" "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor" "github.com/textileio/go-tableland/pkg/parsing" ) @@ -18,7 +19,7 @@ import ( // Executor executes chain events. type Executor struct { log zerolog.Logger - db *sql.DB + db *database.SQLiteDB parser parsing.SQLValidator acl tableland.ACL chBlockScope chan struct{} @@ -36,7 +37,7 @@ var _ executor.Executor = (*Executor)(nil) func NewExecutor( chainID tableland.ChainID, // dbURI string, - db *sql.DB, + db *database.SQLiteDB, parser parsing.SQLValidator, maxTableRowCount int, acl tableland.ACL, @@ -77,7 +78,7 @@ func (ex *Executor) NewBlockScope(ctx context.Context, newBlockNum int64) (execu } releaseBlockScope := func() { ex.chBlockScope <- struct{}{} } - txn, err := ex.db.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable, ReadOnly: false}) + txn, err := ex.db.DB.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable, ReadOnly: false}) if err != nil { releaseBlockScope() return nil, fmt.Errorf("opening db transaction: %s", err) @@ -106,7 +107,7 @@ func (ex *Executor) NewBlockScope(ctx context.Context, newBlockNum int64) (execu // GetLastExecutedBlockNumber returns the last block number that was successfully executed. func (ex *Executor) GetLastExecutedBlockNumber(ctx context.Context) (int64, error) { - txn, err := ex.db.Begin() + txn, err := ex.db.DB.Begin() if err != nil { return 0, fmt.Errorf("opening txn: %s", err) } diff --git a/pkg/eventprocessor/impl/executor/impl/executor_test.go b/pkg/eventprocessor/impl/executor/impl/executor_test.go index 89e0991f..7ccf4377 100644 --- a/pkg/eventprocessor/impl/executor/impl/executor_test.go +++ b/pkg/eventprocessor/impl/executor/impl/executor_test.go @@ -10,11 +10,12 @@ import ( _ "github.com/mattn/go-sqlite3" "github.com/stretchr/testify/require" "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/internal/tableland/impl" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" "github.com/textileio/go-tableland/tests" @@ -88,6 +89,7 @@ func TestMultiEventTxnBlock(t *testing.T) { Statement: "create table bar_1337 (zar text)", } eventInsertRow := ðereum.ContractRunSQL{ + Caller: common.HexToAddress("0xb451cee4A42A652Fe77d373BAe66D42fd6B8D8FF"), IsOwner: true, TableId: eventCreateTable.TableId, Statement: "insert into bar_1337_100 values ('txn 1')", @@ -116,6 +118,7 @@ func TestMultiEventTxnBlock(t *testing.T) { Statement: "create table foo_1337 (fooz text)", } eventInsertRow := ðereum.ContractRunSQL{ + Caller: common.HexToAddress("0xb451cee4A42A652Fe77d373BAe66D42fd6B8D8FF"), IsOwner: true, TableId: eventCreateTable.TableId, Statement: "insert into foo_1337 values ('txn 1', 'wrong # of columns')", @@ -207,15 +210,13 @@ func newExecutor(t *testing.T, rowsLimit int) (*Executor, string) { dbURI := tests.Sqlite3URI(t) parser := newParser(t, []string{}) - db, err := sql.Open("sqlite3", dbURI) - require.NoError(t, err) - db.SetMaxOpenConns(1) - exec, err := NewExecutor(1337, db, parser, rowsLimit, &aclMock{}) + + db, err := database.Open(dbURI, 1) require.NoError(t, err) - // Boostrap system store to run the db migrations. - _, err = system.New(dbURI, tableland.ChainID(chainID)) + exec, err := NewExecutor(1337, db, parser, rowsLimit, impl.NewACL(db)) require.NoError(t, err) + return exec, dbURI } @@ -277,15 +278,3 @@ func newParser(t *testing.T, prefixes []string) parsing.SQLValidator { require.NoError(t, err) return p } - -type aclMock struct{} - -func (acl *aclMock) CheckPrivileges( - _ context.Context, - _ *sql.Tx, - _ common.Address, - _ tables.TableID, - _ tableland.Operation, -) (bool, error) { - return true, nil -} diff --git a/pkg/eventprocessor/impl/executor/impl/txnscope.go b/pkg/eventprocessor/impl/executor/impl/txnscope.go index 5181e401..763a79ce 100644 --- a/pkg/eventprocessor/impl/executor/impl/txnscope.go +++ b/pkg/eventprocessor/impl/executor/impl/txnscope.go @@ -111,3 +111,45 @@ func (ts *txnScope) executeTxnEvents( return executor.TxnExecutionResult{TableID: res.TableID}, nil } + +// AccessControlDTO data structure from database. +type AccessControlDTO struct { + TableID int64 + Controller string + Privileges int + ChainID int64 +} + +// AccessControl model. +type AccessControl struct { + Controller string + ChainID tableland.ChainID + TableID tables.TableID + Privileges tableland.Privileges +} + +// AccessControlFromDTO transforms the DTO to AccessControl model. +func AccessControlFromDTO(dto AccessControlDTO) (AccessControl, error) { + id, err := tables.NewTableIDFromInt64(dto.TableID) + if err != nil { + return AccessControl{}, fmt.Errorf("parsing id to string: %s", err) + } + + var privileges tableland.Privileges + if dto.Privileges&tableland.PrivInsert.Bitfield > 0 { + privileges = append(privileges, tableland.PrivInsert) + } + if dto.Privileges&tableland.PrivUpdate.Bitfield > 0 { + privileges = append(privileges, tableland.PrivUpdate) + } + if dto.Privileges&tableland.PrivDelete.Bitfield > 0 { + privileges = append(privileges, tableland.PrivDelete) + } + + return AccessControl{ + ChainID: tableland.ChainID(dto.ChainID), + TableID: id, + Controller: dto.Controller, + Privileges: privileges, + }, nil +} diff --git a/pkg/eventprocessor/impl/executor/impl/txnscope_createtable_test.go b/pkg/eventprocessor/impl/executor/impl/txnscope_createtable_test.go index 5a2e38b3..5cea4240 100644 --- a/pkg/eventprocessor/impl/executor/impl/txnscope_createtable_test.go +++ b/pkg/eventprocessor/impl/executor/impl/txnscope_createtable_test.go @@ -8,10 +8,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/internal/tableland" + gatewayimpl "github.com/textileio/go-tableland/internal/gateway/impl" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" ) @@ -35,10 +34,9 @@ func TestCreateTable(t *testing.T) { require.NoError(t, ex.Close(ctx)) // Check that the table was registered in the system-table. - systemStore, err := system.New(dbURI, tableland.ChainID(chainID)) - require.NoError(t, err) + tableID, _ := tables.NewTableID("100") - table, err := systemStore.GetTable(ctx, tableID) + table, err := gatewayimpl.NewGatewayStore(ex.db, nil).GetTable(ctx, 1337, tableID) require.NoError(t, err) require.Equal(t, tableID, table.ID) require.Equal(t, "0xb451cee4A42A652Fe77d373BAe66D42fd6B8D8FF", table.Controller) diff --git a/pkg/eventprocessor/impl/executor/impl/txnscope_runsql.go b/pkg/eventprocessor/impl/executor/impl/txnscope_runsql.go index 4d127900..d502dc97 100644 --- a/pkg/eventprocessor/impl/executor/impl/txnscope_runsql.go +++ b/pkg/eventprocessor/impl/executor/impl/txnscope_runsql.go @@ -31,6 +31,7 @@ func (ts *txnScope) executeRunSQLEvent( err := fmt.Sprintf("query targets table id %s and not %s", targetedTableID, tableID) return eventExecutionResult{Error: &err}, nil } + if err := ts.execWriteQueries(ctx, e.Caller, mutatingStmts, e.IsOwner, &policy{e.Policy}); err != nil { var dbErr *errQueryExecution if errors.As(err, &dbErr) { @@ -230,7 +231,7 @@ func (ts *txnScope) executeWriteStmt( return fmt.Errorf("not allowed to execute stmt: %w", err) } } else { - ok, err := ts.acl.CheckPrivileges(ctx, ts.txn, addr, ws.GetTableID(), ws.Operation()) + ok, err := ts.acl.CheckPrivileges(ctx, ts.txn, ts.scopeVars.ChainID, addr, ws.GetTableID(), ws.Operation()) if err != nil { return fmt.Errorf("error checking acl: %s", err) } diff --git a/pkg/eventprocessor/impl/executor/impl/txnscope_runsql_test.go b/pkg/eventprocessor/impl/executor/impl/txnscope_runsql_test.go index cbf44204..f2ee4dc7 100644 --- a/pkg/eventprocessor/impl/executor/impl/txnscope_runsql_test.go +++ b/pkg/eventprocessor/impl/executor/impl/txnscope_runsql_test.go @@ -14,7 +14,6 @@ import ( "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor" "github.com/textileio/go-tableland/pkg/parsing" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" ) @@ -116,7 +115,7 @@ func TestRunSQL_OneEventPerTxn(t *testing.T) { t.Parallel() ctx := context.Background() - ex, dbURI := newExecutorWithIntegerTable(t, 0) + ex, _ := newExecutorWithIntegerTable(t, 0) bs, err := ex.NewBlockScope(ctx, 0) require.NoError(t, err) @@ -131,14 +130,22 @@ func TestRunSQL_OneEventPerTxn(t *testing.T) { require.NoError(t, err) ss := mustGrantStmt(t, q).(parsing.GrantStmt) for _, role := range ss.GetRoles() { - // Check that an entry was inserted in the system_acl table for each row. - systemStore, err := system.New(dbURI, tableland.ChainID(chainID)) + tx, err := ex.db.DB.Begin() require.NoError(t, err) - aclRow, err := systemStore.GetACLOnTableByController(ctx, ss.GetTableID(), role.String()) + + ok, err := ex.acl.CheckPrivileges(ctx, tx, 1337, role, ss.GetTableID(), tableland.OpInsert) + require.NoError(t, err) + require.True(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, tx, 1337, role, ss.GetTableID(), tableland.OpUpdate) + require.NoError(t, err) + require.True(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, tx, 1337, role, ss.GetTableID(), tableland.OpDelete) require.NoError(t, err) - require.Equal(t, ss.GetTableID(), aclRow.TableID) - require.Equal(t, role.String(), aclRow.Controller) - require.ElementsMatch(t, ss.GetPrivileges(), aclRow.Privileges) + require.True(t, ok) + + require.NoError(t, tx.Commit()) } }) @@ -146,7 +153,7 @@ func TestRunSQL_OneEventPerTxn(t *testing.T) { t.Parallel() ctx := context.Background() - ex, dbURI := newExecutorWithIntegerTable(t, 0) + ex, _ := newExecutorWithIntegerTable(t, 0) bs, err := ex.NewBlockScope(ctx, 0) require.NoError(t, err) @@ -162,30 +169,79 @@ func TestRunSQL_OneEventPerTxn(t *testing.T) { require.NoError(t, bs.Close()) require.NoError(t, ex.Close(ctx)) - systemStore, err := system.New(dbURI, tableland.ChainID(chainID)) - require.NoError(t, err) - tableID, _ := tables.NewTableID("100") { - aclRow, err := systemStore.GetACLOnTableByController( - ctx, + tx, err := ex.db.DB.Begin() + require.NoError(t, err) + + ok, err := ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0xD43C59d5694eC111Eb9e986C233200b14249558D"), tableID, - "0xD43C59d5694eC111Eb9e986C233200b14249558D") + tableland.OpInsert, + ) require.NoError(t, err) - require.Equal(t, tableID, aclRow.TableID) - require.Equal(t, "0xD43C59d5694eC111Eb9e986C233200b14249558D", aclRow.Controller) - require.ElementsMatch(t, tableland.Privileges{tableland.PrivInsert, tableland.PrivUpdate}, aclRow.Privileges) + require.True(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0xD43C59d5694eC111Eb9e986C233200b14249558D"), + tableID, + tableland.OpUpdate, + ) + require.NoError(t, err) + require.True(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0xD43C59d5694eC111Eb9e986C233200b14249558D"), + tableID, + tableland.OpDelete, + ) + require.NoError(t, err) + require.False(t, ok) + + require.NoError(t, tx.Commit()) } { - aclRow, err := systemStore.GetACLOnTableByController( - ctx, + tx, err := ex.db.DB.Begin() + require.NoError(t, err) + + ok, err := ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0x4afE8e30DB4549384b0a05bb796468B130c7D6E0"), tableID, - "0x4afE8e30DB4549384b0a05bb796468B130c7D6E0") + tableland.OpInsert, + ) require.NoError(t, err) - require.Equal(t, tableID, aclRow.TableID) - require.Equal(t, "0x4afE8e30DB4549384b0a05bb796468B130c7D6E0", aclRow.Controller) - require.ElementsMatch(t, tableland.Privileges{tableland.PrivInsert, tableland.PrivDelete}, aclRow.Privileges) + require.True(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0x4afE8e30DB4549384b0a05bb796468B130c7D6E0"), + tableID, + tableland.OpUpdate, + ) + require.NoError(t, err) + require.False(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0x4afE8e30DB4549384b0a05bb796468B130c7D6E0"), + tableID, + tableland.OpDelete, + ) + require.NoError(t, err) + require.True(t, ok) + + require.NoError(t, tx.Commit()) } }) @@ -193,7 +249,7 @@ func TestRunSQL_OneEventPerTxn(t *testing.T) { t.Parallel() ctx := context.Background() - ex, dbURI := newExecutorWithIntegerTable(t, 0) + ex, _ := newExecutorWithIntegerTable(t, 0) bs, err := ex.NewBlockScope(ctx, 0) require.NoError(t, err) @@ -206,19 +262,42 @@ func TestRunSQL_OneEventPerTxn(t *testing.T) { require.NoError(t, bs.Close()) require.NoError(t, ex.Close(ctx)) - systemStore, err := system.New(dbURI, tableland.ChainID(chainID)) - require.NoError(t, err) - tableID, _ := tables.NewTableID("100") { - aclRow, err := systemStore.GetACLOnTableByController( - ctx, + tx, err := ex.db.DB.Begin() + require.NoError(t, err) + + ok, err := ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0xD43C59d5694eC111Eb9e986C233200b14249558D"), + tableID, + tableland.OpInsert, + ) + require.NoError(t, err) + require.False(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0xD43C59d5694eC111Eb9e986C233200b14249558D"), tableID, - "0xD43C59d5694eC111Eb9e986C233200b14249558D") + tableland.OpUpdate, + ) require.NoError(t, err) - require.Equal(t, tableID, aclRow.TableID) - require.Equal(t, "0xD43C59d5694eC111Eb9e986C233200b14249558D", aclRow.Controller) - require.ElementsMatch(t, tableland.Privileges{tableland.PrivUpdate}, aclRow.Privileges) + require.True(t, ok) + + ok, err = ex.acl.CheckPrivileges(ctx, + tx, + 1337, + common.HexToAddress("0xD43C59d5694eC111Eb9e986C233200b14249558D"), + tableID, + tableland.OpDelete, + ) + require.NoError(t, err) + require.False(t, ok) + + require.NoError(t, tx.Commit()) } }) } @@ -550,6 +629,7 @@ func execTxnWithRunSQLEventsAndPolicy( events := make([]interface{}, len(stmts)) for i, stmt := range stmts { events[i] = ðereum.ContractRunSQL{ + Caller: common.HexToAddress("0xb451cee4A42A652Fe77d373BAe66D42fd6B8D8FF"), IsOwner: true, TableId: big.NewInt(100), Statement: stmt, diff --git a/pkg/nonce/impl/store.go b/pkg/nonce/impl/store.go index fabe858b..99a9f15d 100644 --- a/pkg/nonce/impl/store.go +++ b/pkg/nonce/impl/store.go @@ -3,42 +3,76 @@ package impl import ( "context" "fmt" + "time" "github.com/ethereum/go-ethereum/common" + "github.com/rs/zerolog" + "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" + "github.com/textileio/go-tableland/pkg/database/db" "github.com/textileio/go-tableland/pkg/nonce" - "github.com/textileio/go-tableland/pkg/sqlstore" ) // NonceStore relies on the SQLStore implementation for now. type NonceStore struct { - systemStore sqlstore.SystemStore + log zerolog.Logger + sqliteDB *database.SQLiteDB } // NewNonceStore creates a new nonce store. -func NewNonceStore(systemStore sqlstore.SystemStore) nonce.NonceStore { - return &NonceStore{systemStore: systemStore} +func NewNonceStore(sqliteDB *database.SQLiteDB) nonce.NonceStore { + log := sqliteDB.Log.With(). + Str("component", "noncestore"). + Logger() + + return &NonceStore{ + log: log, + sqliteDB: sqliteDB, + } } // ListPendingTx lists all pendings txs. func (s *NonceStore) ListPendingTx( - ctx context.Context, - addr common.Address, + ctx context.Context, chainID tableland.ChainID, addr common.Address, ) ([]nonce.PendingTx, error) { - txs, err := s.systemStore.ListPendingTx(ctx, addr) + txs, err := s.sqliteDB.Queries.ListPendingTx(ctx, db.ListPendingTxParams{ + Address: addr.Hex(), + ChainID: int64(chainID), + }) if err != nil { return []nonce.PendingTx{}, fmt.Errorf("nonce store list pending tx: %s", err) } - return txs, nil + pendingTxs := make([]nonce.PendingTx, 0) + for _, r := range txs { + tx := nonce.PendingTx{ + Address: common.HexToAddress(r.Address), + Nonce: r.Nonce, + Hash: common.HexToHash(r.Hash), + ChainID: r.ChainID, + BumpPriceCount: r.BumpPriceCount, + CreatedAt: time.Unix(r.CreatedAt, 0), + } + + pendingTxs = append(pendingTxs, tx) + } + + return pendingTxs, nil } // InsertPendingTx insert a new pending tx. func (s *NonceStore) InsertPendingTx( ctx context.Context, + chainID tableland.ChainID, addr common.Address, nonce int64, hash common.Hash, ) error { - if err := s.systemStore.InsertPendingTx(ctx, addr, nonce, hash); err != nil { + if err := s.sqliteDB.Queries.InsertPendingTx(ctx, db.InsertPendingTxParams{ + ChainID: int64(chainID), + Address: addr.Hex(), + Hash: hash.Hex(), + Nonce: nonce, + }); err != nil { return fmt.Errorf("nonce store insert pending tx: %s", err) } @@ -46,8 +80,11 @@ func (s *NonceStore) InsertPendingTx( } // DeletePendingTxByHash deletes a pending tx. -func (s *NonceStore) DeletePendingTxByHash(ctx context.Context, hash common.Hash) error { - err := s.systemStore.DeletePendingTxByHash(ctx, hash) +func (s *NonceStore) DeletePendingTxByHash(ctx context.Context, chainID tableland.ChainID, hash common.Hash) error { + err := s.sqliteDB.Queries.DeletePendingTxByHash(ctx, db.DeletePendingTxByHashParams{ + ChainID: int64(chainID), + Hash: hash.Hex(), + }) if err != nil { return fmt.Errorf("nonce store delete pending tx: %s", err) } @@ -57,8 +94,14 @@ func (s *NonceStore) DeletePendingTxByHash(ctx context.Context, hash common.Hash // ReplacePendingTxByHash replaces a pending tx hash with another and also bumps the counter // to track how many times this happened for this nonce. -func (s *NonceStore) ReplacePendingTxByHash(ctx context.Context, oldHash common.Hash, newHash common.Hash) error { - err := s.systemStore.ReplacePendingTxByHash(ctx, oldHash, newHash) +func (s *NonceStore) ReplacePendingTxByHash( + ctx context.Context, chainID tableland.ChainID, oldHash common.Hash, newHash common.Hash, +) error { + err := s.sqliteDB.Queries.ReplacePendingTxByHash(ctx, db.ReplacePendingTxByHashParams{ + Hash: oldHash.Hex(), + Hash_2: newHash.Hex(), + ChainID: int64(chainID), + }) if err != nil { return fmt.Errorf("replacing pending tx: %s", err) } diff --git a/pkg/nonce/impl/tracker.go b/pkg/nonce/impl/tracker.go index 9714be0c..337f45ca 100644 --- a/pkg/nonce/impl/tracker.go +++ b/pkg/nonce/impl/tracker.go @@ -132,6 +132,7 @@ func (t *LocalTracker) GetNonce(ctx context.Context) (noncepkg.RegisterPendingTx if err := t.nonceStore.InsertPendingTx( ctx, + t.chainID, t.wallet.Address(), nonce, pendingHash); err != nil { @@ -185,7 +186,7 @@ func (t *LocalTracker) initialize(ctx context.Context) error { } // Get pending txs for the address - pendingTxs, err := t.nonceStore.ListPendingTx(ctx, t.wallet.Address()) + pendingTxs, err := t.nonceStore.ListPendingTx(ctx, t.chainID, t.wallet.Address()) if err != nil { return fmt.Errorf("get nonce for tracker initialization: %s", err) } @@ -267,7 +268,7 @@ func (t *LocalTracker) checkIfPendingTxWasIncluded( } func (t *LocalTracker) deletePendingTxByHash(ctx context.Context, hash common.Hash) error { - if err := t.nonceStore.DeletePendingTxByHash(ctx, hash); err != nil { + if err := t.nonceStore.DeletePendingTxByHash(ctx, t.chainID, hash); err != nil { return fmt.Errorf("delete pending tx: %s", err) } @@ -335,7 +336,7 @@ func (t *LocalTracker) checkPendingTxns() error { cls() break } - if err := t.nonceStore.ReplacePendingTxByHash(ctx, pendingTx.Hash, bumpedTxnHash); err != nil { + if err := t.nonceStore.ReplacePendingTxByHash(ctx, t.chainID, pendingTx.Hash, bumpedTxnHash); err != nil { t.log.Error(). Str("hash", pendingTx.Hash.Hex()). Int64("nonce", pendingTx.Nonce). diff --git a/pkg/nonce/impl/tracker_test.go b/pkg/nonce/impl/tracker_test.go index 5ed68b78..73f858a8 100644 --- a/pkg/nonce/impl/tracker_test.go +++ b/pkg/nonce/impl/tracker_test.go @@ -17,11 +17,11 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethclient" + _ "github.com/mattn/go-sqlite3" // sqlite3 driver "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" + "github.com/textileio/go-tableland/pkg/database/db" noncepkg "github.com/textileio/go-tableland/pkg/nonce" - "github.com/textileio/go-tableland/pkg/sqlstore" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/pkg/wallet" @@ -106,7 +106,7 @@ func TestTrackerPendingTxGotStuck(t *testing.T) { t.Parallel() ctx := context.Background() - tracker, backend, contract, txOpts, wallet, sqlstore := setup(ctx, t) + tracker, backend, contract, txOpts, wallet, sqlitedb := setup(ctx, t) _, err := contract.CreateTable(txOpts, txOpts.From, "CREATE TABLE Foo_1337 (bar int)") require.NoError(t, err) backend.Commit() @@ -128,7 +128,10 @@ func TestTrackerPendingTxGotStuck(t *testing.T) { require.Equal(t, int64(0), nonce1) require.Equal(t, int64(1), nonce2) require.Eventually(t, func() bool { - txs, err := sqlstore.ListPendingTx(ctx, wallet.Address()) + txs, err := sqlitedb.Queries.ListPendingTx(ctx, db.ListPendingTxParams{ + Address: wallet.Address().Hex(), + ChainID: 1337, + }) require.NoError(t, err) return tracker.GetPendingCount(ctx) == 1 && int64(1) == txs[0].Nonce }, 5*time.Second, time.Second) @@ -146,14 +149,15 @@ func TestInitialization(t *testing.T) { wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - sqlstore, err := system.New(url, tableland.ChainID(1337)) + db, err := database.Open(url, 1) + // sqlstore, err := system.New(url, tableland.ChainID(1337)) require.NoError(t, err) // initialize without pending txs { tracker := &LocalTracker{ wallet: wallet, - nonceStore: &NonceStore{sqlstore}, + nonceStore: NewNonceStore(db), chainClient: &ChainMock{}, } @@ -172,9 +176,10 @@ func TestInitialization(t *testing.T) { testAddress := wallet.Address() // insert two pending txs (nonce 0 and nonce 1) - nonceStore := &NonceStore{sqlstore} + nonceStore := NewNonceStore(db) err := nonceStore.InsertPendingTx( ctx, + 1337, testAddress, 0, common.HexToHash("0x119f50bf7f1ff2daa4712119af9dbd429ab727690565f93193f63650b020bc30"), @@ -183,6 +188,7 @@ func TestInitialization(t *testing.T) { err = nonceStore.InsertPendingTx( ctx, + 1337, testAddress, 1, common.HexToHash("0x7a0edee97ea3543c279a7329665cc851a9ea53a39ad5bbce55338052808a23a9"), @@ -191,7 +197,8 @@ func TestInitialization(t *testing.T) { tracker := &LocalTracker{ wallet: wallet, - nonceStore: &NonceStore{sqlstore}, + chainID: 1337, + nonceStore: NewNonceStore(db), chainClient: &ChainMock{}, } @@ -218,15 +225,16 @@ func TestMinBlockDepth(t *testing.T) { wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - sqlstore, err := system.New(url, tableland.ChainID(1337)) + db, err := database.Open(url, 1) require.NoError(t, err) testAddress := wallet.Address() // insert two pending txs (nonce 0 and nonce 1) - nonceStore := &NonceStore{sqlstore} + nonceStore := NewNonceStore(db) err = nonceStore.InsertPendingTx( ctx, + 1337, testAddress, 0, common.HexToHash("0x119f50bf7f1ff2daa4712119af9dbd429ab727690565f93193f63650b020bc30"), @@ -235,6 +243,7 @@ func TestMinBlockDepth(t *testing.T) { err = nonceStore.InsertPendingTx( ctx, + 1337, testAddress, 1, common.HexToHash("0x7a0edee97ea3543c279a7329665cc851a9ea53a39ad5bbce55338052808a23a9"), @@ -243,8 +252,9 @@ func TestMinBlockDepth(t *testing.T) { tracker := &LocalTracker{ wallet: wallet, - nonceStore: &NonceStore{sqlstore}, + nonceStore: NewNonceStore(db), chainClient: &ChainMock{}, + chainID: 1337, pendingTxs: []noncepkg.PendingTx{{ Nonce: 0, @@ -272,7 +282,7 @@ func TestMinBlockDepth(t *testing.T) { err = tracker.checkIfPendingTxWasIncluded(ctx, tracker.pendingTxs[0], h) require.NoError(t, err) require.Equal(t, 1, tracker.GetPendingCount(ctx)) - txs, err := nonceStore.ListPendingTx(ctx, testAddress) + txs, err := nonceStore.ListPendingTx(ctx, 1337, testAddress) require.NoError(t, err) require.Equal(t, 1, len(txs)) @@ -281,7 +291,7 @@ func TestMinBlockDepth(t *testing.T) { err = tracker.checkIfPendingTxWasIncluded(ctx, tracker.pendingTxs[0], h) require.Equal(t, noncepkg.ErrBlockDiffNotEnough, err) require.Equal(t, 1, tracker.GetPendingCount(ctx)) - txs, err = nonceStore.ListPendingTx(ctx, testAddress) + txs, err = nonceStore.ListPendingTx(ctx, 1337, testAddress) require.NoError(t, err) require.Equal(t, 1, len(txs)) @@ -290,7 +300,7 @@ func TestMinBlockDepth(t *testing.T) { err = tracker.checkIfPendingTxWasIncluded(ctx, tracker.pendingTxs[0], h) require.NoError(t, err) require.Equal(t, 0, tracker.GetPendingCount(ctx)) - txs, err = nonceStore.ListPendingTx(ctx, testAddress) + txs, err = nonceStore.ListPendingTx(ctx, 1337, testAddress) require.NoError(t, err) require.Equal(t, 0, len(txs)) } @@ -375,15 +385,16 @@ func TestCheckIfPendingTxIsStuck(t *testing.T) { wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - sqlstore, err := system.New(url, tableland.ChainID(1337)) + db, err := database.Open(url, 1) require.NoError(t, err) testAddress := wallet.Address() // insert two pending txs (nonce 0 and nonce 1) - nonceStore := &NonceStore{sqlstore} + nonceStore := NewNonceStore(db) err = nonceStore.InsertPendingTx( ctx, + 1337, testAddress, 0, common.HexToHash("0xda3601329d295f03dc75bf42569f476f22995c456334c9a39a05e7cb7877dc41"), @@ -392,8 +403,9 @@ func TestCheckIfPendingTxIsStuck(t *testing.T) { tracker := &LocalTracker{ wallet: wallet, - nonceStore: &NonceStore{sqlstore}, + nonceStore: NewNonceStore(db), chainClient: &ChainMock{}, + chainID: 1337, pendingTxs: []noncepkg.PendingTx{{ Nonce: 0, @@ -415,7 +427,7 @@ func TestCheckIfPendingTxIsStuck(t *testing.T) { err = tracker.checkIfPendingTxWasIncluded(ctx, tracker.pendingTxs[0], h) require.Equal(t, noncepkg.ErrPendingTxMayBeStuck, err) require.Equal(t, 1, tracker.GetPendingCount(ctx)) - txs, err := nonceStore.ListPendingTx(ctx, testAddress) + txs, err := nonceStore.ListPendingTx(ctx, 1337, testAddress) require.NoError(t, err) require.Equal(t, 1, len(txs)) } @@ -477,7 +489,7 @@ func setup(ctx context.Context, t *testing.T) ( *ethereum.Contract, *bind.TransactOpts, *wallet.Wallet, - sqlstore.SystemStore, + *database.SQLiteDB, ) { url := tests.Sqlite3URI(t) @@ -494,13 +506,13 @@ func setup(ctx context.Context, t *testing.T) ( wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - sqlstore, err := system.New(url, tableland.ChainID(1337)) + db, err := database.Open(url, 1) require.NoError(t, err) tracker, err := NewLocalTracker( ctx, wallet, - &NonceStore{sqlstore}, + NewNonceStore(db), 1337, backend, 500*time.Millisecond, @@ -508,7 +520,7 @@ func setup(ctx context.Context, t *testing.T) ( 10*time.Minute) require.NoError(t, err) - return tracker, backend, contract, txOptsTo, wallet, sqlstore + return tracker, backend, contract, txOptsTo, wallet, db } func requireTxn( diff --git a/pkg/nonce/tracker.go b/pkg/nonce/tracker.go index 9aad7330..c6ec87ab 100644 --- a/pkg/nonce/tracker.go +++ b/pkg/nonce/tracker.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/textileio/go-tableland/internal/tableland" ) // PendingTx represents a pending tx. @@ -63,8 +64,8 @@ type ChainClient interface { // NonceStore provides the api for managing the storage of nonce and pending txs. type NonceStore interface { - ListPendingTx(context.Context, common.Address) ([]PendingTx, error) - InsertPendingTx(context.Context, common.Address, int64, common.Hash) error - DeletePendingTxByHash(context.Context, common.Hash) error - ReplacePendingTxByHash(context.Context, common.Hash, common.Hash) error + ListPendingTx(context.Context, tableland.ChainID, common.Address) ([]PendingTx, error) + InsertPendingTx(context.Context, tableland.ChainID, common.Address, int64, common.Hash) error + DeletePendingTxByHash(context.Context, tableland.ChainID, common.Hash) error + ReplacePendingTxByHash(context.Context, tableland.ChainID, common.Hash, common.Hash) error } diff --git a/pkg/sqlstore/impl/system/internal/db/registry.sql.go b/pkg/sqlstore/impl/system/internal/db/registry.sql.go deleted file mode 100644 index 03e58104..00000000 --- a/pkg/sqlstore/impl/system/internal/db/registry.sql.go +++ /dev/null @@ -1,111 +0,0 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.15.0 -// source: registry.sql - -package db - -import ( - "context" -) - -const getTable = `-- name: GetTable :one -SELECT id, structure, controller, prefix, created_at, chain_id FROM registry WHERE chain_id =?1 AND id = ?2 -` - -type GetTableParams struct { - ChainID int64 - ID int64 -} - -func (q *Queries) GetTable(ctx context.Context, arg GetTableParams) (Registry, error) { - row := q.queryRow(ctx, q.getTableStmt, getTable, arg.ChainID, arg.ID) - var i Registry - err := row.Scan( - &i.ID, - &i.Structure, - &i.Controller, - &i.Prefix, - &i.CreatedAt, - &i.ChainID, - ) - return i, err -} - -const getTablesByController = `-- name: GetTablesByController :many -SELECT id, structure, controller, prefix, created_at, chain_id FROM registry WHERE chain_id=?1 AND upper(controller) LIKE upper(?2) -` - -type GetTablesByControllerParams struct { - ChainID int64 - UPPER string -} - -func (q *Queries) GetTablesByController(ctx context.Context, arg GetTablesByControllerParams) ([]Registry, error) { - rows, err := q.query(ctx, q.getTablesByControllerStmt, getTablesByController, arg.ChainID, arg.UPPER) - if err != nil { - return nil, err - } - defer rows.Close() - var items []Registry - for rows.Next() { - var i Registry - if err := rows.Scan( - &i.ID, - &i.Structure, - &i.Controller, - &i.Prefix, - &i.CreatedAt, - &i.ChainID, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} - -const getTablesByStructure = `-- name: GetTablesByStructure :many -SELECT id, structure, controller, prefix, created_at, chain_id FROM registry WHERE chain_id=?1 AND structure=?2 -` - -type GetTablesByStructureParams struct { - ChainID int64 - Structure string -} - -func (q *Queries) GetTablesByStructure(ctx context.Context, arg GetTablesByStructureParams) ([]Registry, error) { - rows, err := q.query(ctx, q.getTablesByStructureStmt, getTablesByStructure, arg.ChainID, arg.Structure) - if err != nil { - return nil, err - } - defer rows.Close() - var items []Registry - for rows.Next() { - var i Registry - if err := rows.Scan( - &i.ID, - &i.Structure, - &i.Controller, - &i.Prefix, - &i.CreatedAt, - &i.ChainID, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} diff --git a/pkg/sqlstore/impl/system/queries/registry.sql b/pkg/sqlstore/impl/system/queries/registry.sql deleted file mode 100644 index 791226c1..00000000 --- a/pkg/sqlstore/impl/system/queries/registry.sql +++ /dev/null @@ -1,8 +0,0 @@ --- name: GetTable :one -SELECT * FROM registry WHERE chain_id =?1 AND id = ?2; - --- name: GetTablesByController :many -SELECT * FROM registry WHERE chain_id=?1 AND upper(controller) LIKE upper(?2); - --- name: GetTablesByStructure :many -SELECT * FROM registry WHERE chain_id=?1 AND structure=?2; \ No newline at end of file diff --git a/pkg/sqlstore/impl/system/store.go b/pkg/sqlstore/impl/system/store.go deleted file mode 100644 index 8488651f..00000000 --- a/pkg/sqlstore/impl/system/store.go +++ /dev/null @@ -1,641 +0,0 @@ -package system - -import ( - "context" - "database/sql" - "errors" - "fmt" - "strings" - "time" - - "github.com/XSAM/otelsql" - "github.com/ethereum/go-ethereum/common" - "github.com/google/uuid" - - "github.com/rs/zerolog" - logger "github.com/rs/zerolog/log" - "github.com/tablelandnetwork/sqlparser" - - "go.opentelemetry.io/otel/attribute" - - "github.com/golang-migrate/migrate/v4" - _ "github.com/golang-migrate/migrate/v4/database/sqlite3" // migration for sqlite3 - bindata "github.com/golang-migrate/migrate/v4/source/go_bindata" - "github.com/textileio/go-tableland/internal/tableland" - "github.com/textileio/go-tableland/pkg/eventprocessor" - "github.com/textileio/go-tableland/pkg/metrics" - "github.com/textileio/go-tableland/pkg/nonce" - "github.com/textileio/go-tableland/pkg/parsing" - "github.com/textileio/go-tableland/pkg/sqlstore" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/internal/db" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/migrations" - "github.com/textileio/go-tableland/pkg/tables" -) - -// SystemStore provides a persistent layer for storage requests. -// The methods implemented by this layer can be executed inside a given transaction or not. -// For safety reasons, this layer has no access to the database object or the transaction object. -// The access is made through the dbWithTx interface. -type SystemStore struct { - log zerolog.Logger - chainID tableland.ChainID - dbWithTx dbWithTx - db *sql.DB - resolver sqlparser.ReadStatementResolver -} - -// New returns a new SystemStore backed by database/sql. -func New(dbURI string, chainID tableland.ChainID) (*SystemStore, error) { - attrs := append([]attribute.KeyValue{ - attribute.String("name", "systemstore"), - attribute.Int64("chain_id", int64(chainID)), - }, - metrics.BaseAttrs...) - dbc, err := otelsql.Open("sqlite3", dbURI, otelsql.WithAttributes(attrs...)) - if err != nil { - return nil, fmt.Errorf("connecting to db: %s", err) - } - if err := otelsql.RegisterDBStatsMetrics(dbc, otelsql.WithAttributes( - attribute.String("name", "systemstore"), - attribute.Int64("chain_id", int64(chainID)), - )); err != nil { - return nil, fmt.Errorf("registering dbstats: %s", err) - } - - log := logger.With(). - Str("component", "systemstore"). - Int64("chain_id", int64(chainID)). - Logger() - - systemStore := &SystemStore{ - log: log, - dbWithTx: &dbWithTxImpl{db: db.New(dbc)}, - db: dbc, - chainID: chainID, - } - - as := bindata.Resource(migrations.AssetNames(), migrations.Asset) - if err := systemStore.executeMigration(dbURI, as); err != nil { - return nil, fmt.Errorf("initializing db connection: %s", err) - } - - return systemStore, nil -} - -// SetReadResolver sets the resolver for read queries. -func (s *SystemStore) SetReadResolver(resolver sqlparser.ReadStatementResolver) { - s.resolver = resolver -} - -// GetTable fetchs a table from its UUID. -func (s *SystemStore) GetTable(ctx context.Context, id tables.TableID) (sqlstore.Table, error) { - table, err := s.dbWithTx.queries().GetTable(ctx, db.GetTableParams{ - ChainID: int64(s.chainID), - ID: id.ToBigInt().Int64(), - }) - if err != nil { - return sqlstore.Table{}, fmt.Errorf("failed to get the table: %w", err) - } - return tableFromSQLToDTO(table) -} - -// GetTablesByController fetchs a table from controller address. -func (s *SystemStore) GetTablesByController(ctx context.Context, controller string) ([]sqlstore.Table, error) { - if err := sanitizeAddress(controller); err != nil { - return []sqlstore.Table{}, fmt.Errorf("sanitizing address: %s", err) - } - sqlcTables, err := s.dbWithTx.queries().GetTablesByController(ctx, db.GetTablesByControllerParams{ - ChainID: int64(s.chainID), - UPPER: controller, - }) - if err != nil { - return []sqlstore.Table{}, fmt.Errorf("failed to get the table: %s", err) - } - - tables := make([]sqlstore.Table, len(sqlcTables)) - for i := range sqlcTables { - tables[i], err = tableFromSQLToDTO(sqlcTables[i]) - if err != nil { - return nil, fmt.Errorf("parsing database table to dto: %s", err) - } - } - - return tables, nil -} - -// GetACLOnTableByController returns the privileges on table stored in the database for a given controller. -func (s *SystemStore) GetACLOnTableByController( - ctx context.Context, - id tables.TableID, - controller string, -) (sqlstore.SystemACL, error) { - params := db.GetAclByTableAndControllerParams{ - ChainID: int64(s.chainID), - UPPER: controller, - TableID: id.ToBigInt().Int64(), - } - - systemACL, err := s.dbWithTx.queries().GetAclByTableAndController(ctx, params) - if err == sql.ErrNoRows { - return sqlstore.SystemACL{ - Controller: controller, - TableID: id, - }, nil - } - - if err != nil { - return sqlstore.SystemACL{}, fmt.Errorf("failed to get the acl info: %s", err) - } - - return aclFromSQLtoDTO(systemACL) -} - -// ListPendingTx lists all pendings txs. -func (s *SystemStore) ListPendingTx(ctx context.Context, addr common.Address) ([]nonce.PendingTx, error) { - params := db.ListPendingTxParams{ - Address: addr.Hex(), - ChainID: int64(s.chainID), - } - - res, err := s.dbWithTx.queries().ListPendingTx(ctx, params) - if err != nil { - return nil, fmt.Errorf("list pending tx: %s", err) - } - - pendingTxs := make([]nonce.PendingTx, 0) - for _, r := range res { - tx := nonce.PendingTx{ - Address: common.HexToAddress(r.Address), - Nonce: r.Nonce, - Hash: common.HexToHash(r.Hash), - ChainID: r.ChainID, - BumpPriceCount: r.BumpPriceCount, - CreatedAt: time.Unix(r.CreatedAt, 0), - } - - pendingTxs = append(pendingTxs, tx) - } - - return pendingTxs, nil -} - -// InsertPendingTx insert a new pending tx. -func (s *SystemStore) InsertPendingTx( - ctx context.Context, - addr common.Address, - nonce int64, hash common.Hash, -) error { - params := db.InsertPendingTxParams{ - Address: addr.Hex(), - ChainID: int64(s.chainID), - Nonce: nonce, - Hash: hash.Hex(), - } - - err := s.dbWithTx.queries().InsertPendingTx(ctx, params) - if err != nil { - return fmt.Errorf("insert pending tx: %s", err) - } - - return nil -} - -// DeletePendingTxByHash deletes a pending tx. -func (s *SystemStore) DeletePendingTxByHash(ctx context.Context, hash common.Hash) error { - err := s.dbWithTx.queries().DeletePendingTxByHash(ctx, db.DeletePendingTxByHashParams{ - ChainID: int64(s.chainID), - Hash: hash.Hex(), - }) - if err != nil { - return fmt.Errorf("delete pending tx: %s", err) - } - - return nil -} - -// ReplacePendingTxByHash replaces the txn hash of a pending txn and bumps the counter of how many times this happened. -func (s *SystemStore) ReplacePendingTxByHash(ctx context.Context, oldHash common.Hash, newHash common.Hash) error { - err := s.dbWithTx.queries().ReplacePendingTxByHash(ctx, db.ReplacePendingTxByHashParams{ - ChainID: int64(s.chainID), - Hash: oldHash.Hex(), - Hash_2: newHash.Hex(), - }) - if err != nil { - return fmt.Errorf("replace pending tx: %s", err) - } - return nil -} - -// GetTablesByStructure gets all tables with a particular structure hash. -func (s *SystemStore) GetTablesByStructure(ctx context.Context, structure string) ([]sqlstore.Table, error) { - rows, err := s.dbWithTx.queries().GetTablesByStructure(ctx, db.GetTablesByStructureParams{ - ChainID: int64(s.chainID), - Structure: structure, - }) - if err != nil { - return []sqlstore.Table{}, fmt.Errorf("failed to get the table: %s", err) - } - - tables := make([]sqlstore.Table, len(rows)) - for i := range rows { - tables[i], err = tableFromSQLToDTO(rows[i]) - if err != nil { - return nil, fmt.Errorf("parsing database table to dto: %s", err) - } - } - - return tables, nil -} - -// GetSchemaByTableName get the schema of a table by its name. -func (s *SystemStore) GetSchemaByTableName(ctx context.Context, name string) (sqlstore.TableSchema, error) { - createStmt, err := s.dbWithTx.queries().GetSchemaByTableName(ctx, name) - if err != nil { - return sqlstore.TableSchema{}, fmt.Errorf("failed to get the table: %s", err) - } - - if strings.Contains(strings.ToLower(createStmt), "autoincrement") { - createStmt = strings.Replace(createStmt, "autoincrement", "", -1) - } - - index := strings.LastIndex(strings.ToLower(createStmt), "strict") - ast, err := sqlparser.Parse(createStmt[:index]) - if err != nil { - return sqlstore.TableSchema{}, fmt.Errorf("failed to parse create stmt: %s", err) - } - - if ast.Errors[0] != nil { - return sqlstore.TableSchema{}, fmt.Errorf("non-syntax error: %s", ast.Errors[0]) - } - - createTableNode := ast.Statements[0].(*sqlparser.CreateTable) - columns := make([]sqlstore.ColumnSchema, len(createTableNode.ColumnsDef)) - for i, col := range createTableNode.ColumnsDef { - colConstraints := []string{} - for _, colConstraint := range col.Constraints { - colConstraints = append(colConstraints, colConstraint.String()) - } - - columns[i] = sqlstore.ColumnSchema{ - Name: col.Column.String(), - Type: strings.ToLower(col.Type), - Constraints: colConstraints, - } - } - - tableConstraints := make([]string, len(createTableNode.Constraints)) - for i, tableConstraint := range createTableNode.Constraints { - tableConstraints[i] = tableConstraint.String() - } - - return sqlstore.TableSchema{ - Columns: columns, - TableConstraints: tableConstraints, - }, nil -} - -// GetID returns node identifier. -func (s *SystemStore) GetID(ctx context.Context) (string, error) { - id, err := s.dbWithTx.queries().GetId(ctx) - if err == sql.ErrNoRows { - id = strings.Replace(uuid.NewString(), "-", "", -1) - if err := s.dbWithTx.queries().InsertId(ctx, id); err != nil { - return "", fmt.Errorf("failed to insert id: %s", err) - } - return id, nil - } - if err != nil { - return "", fmt.Errorf("failed to get id: %s", err) - } - - return id, err -} - -// Read executes a read statement on the db. -func (s *SystemStore) Read(ctx context.Context, rq parsing.ReadStmt) (*tableland.TableData, error) { - query, err := rq.GetQuery(s.resolver) - if err != nil { - return nil, fmt.Errorf("get query: %s", err) - } - ret, err := s.execReadQuery(ctx, s.db, query) - if err != nil { - return nil, fmt.Errorf("parsing result to json: %s", err) - } - - return ret, nil -} - -func (s *SystemStore) execReadQuery(ctx context.Context, tx *sql.DB, q string) (*tableland.TableData, error) { - rows, err := tx.QueryContext(ctx, q) - if err != nil { - return nil, fmt.Errorf("executing query: %s", err) - } - defer func() { - if err = rows.Close(); err != nil { - s.log.Warn().Err(err).Msg("closing rows") - } - }() - return rowsToTableData(rows) -} - -// WithTx returns a copy of the current SystemStore with a tx attached. -func (s *SystemStore) WithTx(tx *sql.Tx) sqlstore.SystemStore { - return &SystemStore{ - chainID: s.chainID, - dbWithTx: &dbWithTxImpl{ - db: s.dbWithTx.queries(), - tx: tx, - }, - db: s.db, - } -} - -// Begin returns a new tx. -func (s *SystemStore) Begin(_ context.Context) (*sql.Tx, error) { - return s.db.Begin() -} - -// GetReceipt returns a event receipt by transaction hash. -func (s *SystemStore) GetReceipt( - ctx context.Context, - txnHash string, -) (eventprocessor.Receipt, bool, error) { - params := db.GetReceiptParams{ - ChainID: int64(s.chainID), - TxnHash: txnHash, - } - - res, err := s.dbWithTx.queries().GetReceipt(ctx, params) - if err == sql.ErrNoRows { - return eventprocessor.Receipt{}, false, nil - } - if err != nil { - return eventprocessor.Receipt{}, false, fmt.Errorf("get receipt: %s", err) - } - - receipt := eventprocessor.Receipt{ - ChainID: s.chainID, - BlockNumber: res.BlockNumber, - IndexInBlock: res.IndexInBlock, - TxnHash: txnHash, - } - if res.Error.Valid { - receipt.Error = &res.Error.String - - errorEventIdx := int(res.ErrorEventIdx.Int64) - receipt.ErrorEventIdx = &errorEventIdx - } - if res.TableID.Valid { - id, err := tables.NewTableIDFromInt64(res.TableID.Int64) - if err != nil { - return eventprocessor.Receipt{}, false, fmt.Errorf("parsing id to string: %s", err) - } - receipt.TableID = &id - } - - return receipt, true, nil -} - -// AreEVMEventsPersisted returns true if there're events persisted for the provided txn hash, and false otherwise. -func (s *SystemStore) AreEVMEventsPersisted(ctx context.Context, txnHash common.Hash) (bool, error) { - params := db.AreEVMEventsPersistedParams{ - ChainID: int64(s.chainID), - TxHash: txnHash.Hex(), - } - _, err := s.dbWithTx.queries().AreEVMEventsPersisted(ctx, params) - if err == sql.ErrNoRows { - return false, nil - } - if err != nil { - return false, fmt.Errorf("evm txn events lookup: %s", err) - } - return true, nil -} - -// SaveEVMEvents saves the provider EVMEvents. -func (s *SystemStore) SaveEVMEvents(ctx context.Context, events []tableland.EVMEvent) error { - queries := s.dbWithTx.queries() - for _, e := range events { - args := db.InsertEVMEventParams{ - ChainID: int64(e.ChainID), - EventJson: string(e.EventJSON), - EventType: e.EventType, - Address: e.Address.Hex(), - Topics: string(e.Topics), - Data: e.Data, - BlockNumber: int64(e.BlockNumber), - TxHash: e.TxHash.Hex(), - TxIndex: e.TxIndex, - BlockHash: e.BlockHash.Hex(), - EventIndex: e.Index, - } - if err := queries.InsertEVMEvent(ctx, args); err != nil { - return fmt.Errorf("insert evm event: %s", err) - } - } - - return nil -} - -// GetBlocksMissingExtraInfo returns a list of block numbers that don't contain enhanced information. -// It receives an optional fromHeight to only look for blocks after a block number. If null it will look -// for blocks at any height. -func (s *SystemStore) GetBlocksMissingExtraInfo(ctx context.Context, lastKnownHeight *int64) ([]int64, error) { - var blockNumbers []int64 - var err error - if lastKnownHeight == nil { - blockNumbers, err = s.dbWithTx.queries().GetBlocksMissingExtraInfo(ctx, int64(s.chainID)) - } else { - params := db.GetBlocksMissingExtraInfoByBlockNumberParams{ - ChainID: int64(s.chainID), - BlockNumber: *lastKnownHeight, - } - blockNumbers, err = s.dbWithTx.queries().GetBlocksMissingExtraInfoByBlockNumber(ctx, params) - } - if err == sql.ErrNoRows { - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("get blocks missing extra info: %s", err) - } - - return blockNumbers, nil -} - -// InsertBlockExtraInfo inserts enhanced information for a block. -func (s *SystemStore) InsertBlockExtraInfo(ctx context.Context, blockNumber int64, timestamp uint64) error { - params := db.InsertBlockExtraInfoParams{ - ChainID: int64(s.chainID), - BlockNumber: blockNumber, - Timestamp: int64(timestamp), - } - if err := s.dbWithTx.queries().InsertBlockExtraInfo(ctx, params); err != nil { - return fmt.Errorf("insert block extra info: %s", err) - } - - return nil -} - -// GetBlockExtraInfo info returns stored information about an EVM block. -func (s *SystemStore) GetBlockExtraInfo(ctx context.Context, blockNumber int64) (tableland.EVMBlockInfo, error) { - params := db.GetBlockExtraInfoParams{ - ChainID: int64(s.chainID), - BlockNumber: blockNumber, - } - - blockInfo, err := s.dbWithTx.queries().GetBlockExtraInfo(ctx, params) - if err == sql.ErrNoRows { - return tableland.EVMBlockInfo{}, fmt.Errorf("block information not found: %w", err) - } - if err != nil { - return tableland.EVMBlockInfo{}, fmt.Errorf("get block information: %s", err) - } - - return tableland.EVMBlockInfo{ - ChainID: tableland.ChainID(blockInfo.ChainID), - BlockNumber: blockInfo.BlockNumber, - Timestamp: time.Unix(blockInfo.Timestamp, 0), - }, nil -} - -// GetEVMEvents returns all the persisted events for a transaction. -func (s *SystemStore) GetEVMEvents(ctx context.Context, txnHash common.Hash) ([]tableland.EVMEvent, error) { - args := db.GetEVMEventsParams{ - ChainID: int64(s.chainID), - TxHash: txnHash.Hex(), - } - events, err := s.dbWithTx.queries().GetEVMEvents(ctx, args) - if err != nil { - return nil, fmt.Errorf("get events by txhash: %s", err) - } - - ret := make([]tableland.EVMEvent, len(events)) - for i, event := range events { - ret[i] = tableland.EVMEvent{ - Address: common.HexToAddress(event.Address), - Topics: []byte(event.Topics), - Data: event.Data, - BlockNumber: uint64(event.BlockNumber), - TxHash: common.HexToHash(event.TxHash), - TxIndex: event.TxIndex, - BlockHash: common.HexToHash(event.BlockHash), - Index: event.EventIndex, - ChainID: tableland.ChainID(event.ChainID), - EventJSON: []byte(event.EventJson), - EventType: event.EventType, - } - } - - return ret, nil -} - -// Close closes the store. -func (s *SystemStore) Close() error { - if err := s.db.Close(); err != nil { - return fmt.Errorf("closing db: %s", err) - } - return nil -} - -// executeMigration run db migrations and return a ready to use connection to the SQLite database. -func (s *SystemStore) executeMigration(dbURI string, as *bindata.AssetSource) error { - d, err := bindata.WithInstance(as) - if err != nil { - return fmt.Errorf("creating source driver: %s", err) - } - - m, err := migrate.NewWithSourceInstance("go-bindata", d, "sqlite3://"+dbURI) - if err != nil { - return fmt.Errorf("creating migration: %s", err) - } - defer func() { - if _, err := m.Close(); err != nil { - s.log.Error().Err(err).Msg("closing db migration") - } - }() - version, dirty, err := m.Version() - s.log.Info(). - Uint("dbVersion", version). - Bool("dirty", dirty). - Err(err). - Msg("database migration executed") - - if err := m.Up(); err != nil && err != migrate.ErrNoChange { - return fmt.Errorf("running migration up: %s", err) - } - - return nil -} - -func tableFromSQLToDTO(table db.Registry) (sqlstore.Table, error) { - id, err := tables.NewTableIDFromInt64(table.ID) - if err != nil { - return sqlstore.Table{}, fmt.Errorf("parsing id to string: %s", err) - } - return sqlstore.Table{ - ID: id, - ChainID: tableland.ChainID(table.ChainID), - Controller: table.Controller, - Prefix: table.Prefix, - Structure: table.Structure, - CreatedAt: time.Unix(table.CreatedAt, 0), - }, nil -} - -func aclFromSQLtoDTO(acl db.SystemAcl) (sqlstore.SystemACL, error) { - id, err := tables.NewTableIDFromInt64(acl.TableID) - if err != nil { - return sqlstore.SystemACL{}, fmt.Errorf("parsing id to string: %s", err) - } - - var privileges tableland.Privileges - if acl.Privileges&tableland.PrivInsert.Bitfield > 0 { - privileges = append(privileges, tableland.PrivInsert) - } - if acl.Privileges&tableland.PrivUpdate.Bitfield > 0 { - privileges = append(privileges, tableland.PrivUpdate) - } - if acl.Privileges&tableland.PrivDelete.Bitfield > 0 { - privileges = append(privileges, tableland.PrivDelete) - } - - systemACL := sqlstore.SystemACL{ - ChainID: tableland.ChainID(acl.ChainID), - TableID: id, - Controller: acl.Controller, - Privileges: privileges, - CreatedAt: time.Unix(acl.CreatedAt, 0), - } - - if acl.UpdatedAt.Valid { - updatedAt := time.Unix(acl.UpdatedAt.Int64, 0) - systemACL.UpdatedAt = &updatedAt - } - - return systemACL, nil -} - -func sanitizeAddress(address string) error { - if strings.ContainsAny(address, "%_") { - return errors.New("address contains invalid characters") - } - return nil -} - -// DBWithTx gives access to db.Queries with the possibility -// of a tx attached, preventing direct access to the db and tx. -type dbWithTx interface { - queries() *db.Queries -} - -type dbWithTxImpl struct { - db *db.Queries - tx *sql.Tx -} - -func (d *dbWithTxImpl) queries() *db.Queries { - if d.tx == nil { - return d.db - } - return d.db.WithTx(d.tx) -} diff --git a/pkg/sqlstore/impl/system/store_instrumented.go b/pkg/sqlstore/impl/system/store_instrumented.go deleted file mode 100644 index 1c5868a6..00000000 --- a/pkg/sqlstore/impl/system/store_instrumented.go +++ /dev/null @@ -1,437 +0,0 @@ -package system - -import ( - "context" - "database/sql" - "fmt" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/rs/zerolog/log" - "github.com/tablelandnetwork/sqlparser" - "github.com/textileio/go-tableland/internal/tableland" - "github.com/textileio/go-tableland/pkg/eventprocessor" - "github.com/textileio/go-tableland/pkg/metrics" - "github.com/textileio/go-tableland/pkg/nonce" - "github.com/textileio/go-tableland/pkg/parsing" - "github.com/textileio/go-tableland/pkg/sqlstore" - "github.com/textileio/go-tableland/pkg/tables" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" -) - -// InstrumentedSystemStore implements a instrumented SQLStore. -type InstrumentedSystemStore struct { - chainID tableland.ChainID - store sqlstore.SystemStore - callCount instrument.Int64Counter - latencyHistogram instrument.Int64Histogram -} - -// NewInstrumentedSystemStore creates a new db pool and instantiate both the user and system stores. -func NewInstrumentedSystemStore(chainID tableland.ChainID, store sqlstore.SystemStore) (sqlstore.SystemStore, error) { - meter := global.MeterProvider().Meter("tableland") - callCount, err := meter.Int64Counter("tableland.sqlstore.call.count") - if err != nil { - return &InstrumentedSystemStore{}, fmt.Errorf("registering call counter: %s", err) - } - latencyHistogram, err := meter.Int64Histogram("tableland.sqlstore.call.latency") - if err != nil { - return &InstrumentedSystemStore{}, fmt.Errorf("registering latency histogram: %s", err) - } - - return &InstrumentedSystemStore{ - chainID: chainID, - store: store, - callCount: callCount, - latencyHistogram: latencyHistogram, - }, nil -} - -// SetReadResolver sets the resolver for read queries. -func (s *InstrumentedSystemStore) SetReadResolver(resolver sqlparser.ReadStatementResolver) { - s.store.SetReadResolver(resolver) -} - -// GetTable fetchs a table from its UUID. -func (s *InstrumentedSystemStore) GetTable(ctx context.Context, id tables.TableID) (sqlstore.Table, error) { - start := time.Now() - table, err := s.store.GetTable(ctx, id) - latency := time.Since(start).Milliseconds() - - // NOTE: we may face a risk of high-cardilatity in the future. This should be revised. - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetTable")}, - {Key: "id", Value: attribute.StringValue(id.String())}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return table, err -} - -// GetTablesByController fetchs a table from controller address. -func (s *InstrumentedSystemStore) GetTablesByController( - ctx context.Context, - controller string, -) ([]sqlstore.Table, error) { - start := time.Now() - tables, err := s.store.GetTablesByController(ctx, controller) - latency := time.Since(start).Milliseconds() - - // NOTE: we may face a risk of high-cardilatity in the future. This should be revised. - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetTablesByController")}, - {Key: "controller", Value: attribute.StringValue(controller)}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return tables, err -} - -// GetTablesByStructure gets all tables with a particular structure hash. -func (s *InstrumentedSystemStore) GetTablesByStructure( - ctx context.Context, - structure string, -) ([]sqlstore.Table, error) { - start := time.Now() - tables, err := s.store.GetTablesByStructure(ctx, structure) - latency := time.Since(start).Milliseconds() - - // NOTE: we may face a risk of high-cardilatity in the future. This should be revised. - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetTablesByStructure")}, - {Key: "structure", Value: attribute.StringValue(structure)}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return tables, err -} - -// GetSchemaByTableName get the schema of a table by its name. -func (s *InstrumentedSystemStore) GetSchemaByTableName(ctx context.Context, name string) (sqlstore.TableSchema, error) { - start := time.Now() - tables, err := s.store.GetSchemaByTableName(ctx, name) - latency := time.Since(start).Milliseconds() - - // NOTE: we may face a risk of high-cardilatity in the future. This should be revised. - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetSchemaByTableName")}, - {Key: "name", Value: attribute.StringValue(name)}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return tables, err -} - -// GetACLOnTableByController increments the counter. -func (s *InstrumentedSystemStore) GetACLOnTableByController( - ctx context.Context, - table tables.TableID, - address string, -) (sqlstore.SystemACL, error) { - start := time.Now() - systemACL, err := s.store.GetACLOnTableByController(ctx, table, address) - latency := time.Since(start).Milliseconds() - - // NOTE: we may face a risk of high-cardilatity in the future. This should be revised. - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetACLOnTableByController")}, - {Key: "address", Value: attribute.StringValue(address)}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return systemACL, err -} - -// ListPendingTx lists all pendings txs. -func (s *InstrumentedSystemStore) ListPendingTx( - ctx context.Context, - addr common.Address, -) ([]nonce.PendingTx, error) { - start := time.Now() - data, err := s.store.ListPendingTx(ctx, addr) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("ListPendingTx")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return data, err -} - -// InsertPendingTx insert a new pending tx. -func (s *InstrumentedSystemStore) InsertPendingTx( - ctx context.Context, - addr common.Address, - nonce int64, - hash common.Hash, -) error { - start := time.Now() - err := s.store.InsertPendingTx(ctx, addr, nonce, hash) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("InsertPendingTx")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return err -} - -// DeletePendingTxByHash deletes a pending tx. -func (s *InstrumentedSystemStore) DeletePendingTxByHash(ctx context.Context, hash common.Hash) error { - start := time.Now() - err := s.store.DeletePendingTxByHash(ctx, hash) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("DeletePendingTxByHash")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return err -} - -// ReplacePendingTxByHash replaces a pending txn hash and bumps the counter on how many times this happened. -func (s *InstrumentedSystemStore) ReplacePendingTxByHash( - ctx context.Context, - oldHash common.Hash, - newHash common.Hash, -) error { - start := time.Now() - err := s.store.ReplacePendingTxByHash(ctx, oldHash, newHash) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("ReplacePendingTxByHash")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return err -} - -// Close closes the connection pool. -func (s *InstrumentedSystemStore) Close() error { - return s.store.Close() -} - -// WithTx returns a copy of the current InstrumentedSQLStore with a tx attached. -func (s *InstrumentedSystemStore) WithTx(tx *sql.Tx) sqlstore.SystemStore { - return s.store.WithTx(tx) -} - -// Begin returns a new tx. -func (s *InstrumentedSystemStore) Begin(ctx context.Context) (*sql.Tx, error) { - return s.store.Begin(ctx) -} - -// GetReceipt returns the receipt of a processed event by txn hash. -func (s *InstrumentedSystemStore) GetReceipt( - ctx context.Context, - txnHash string, -) (eventprocessor.Receipt, bool, error) { - log.Debug().Str("txn_hash", txnHash).Msg("call GetReceipt") - start := time.Now() - receipt, ok, err := s.store.GetReceipt(ctx, txnHash) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetReceipt")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return receipt, ok, err -} - -// AreEVMEventsPersisted implements sqlstore.SystemStore. -func (s *InstrumentedSystemStore) AreEVMEventsPersisted(ctx context.Context, txnHash common.Hash) (bool, error) { - log.Debug().Str("txn_hash", txnHash.Hex()).Msg("call AreEVMEventsPersisted") - start := time.Now() - ok, err := s.store.AreEVMEventsPersisted(ctx, txnHash) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("AreEVMEventsPersisted")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return ok, err -} - -// SaveEVMEvents implements sqlstore.SystemStore. -func (s *InstrumentedSystemStore) SaveEVMEvents(ctx context.Context, events []tableland.EVMEvent) error { - log.Debug().Msg("call SaveEVMEvents") - start := time.Now() - err := s.store.SaveEVMEvents(ctx, events) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("SaveEVMEvents")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return err -} - -// GetEVMEvents implements sqlstore.SystemStore. -func (s *InstrumentedSystemStore) GetEVMEvents(ctx context.Context, txnHash common.Hash) ([]tableland.EVMEvent, error) { - log.Debug().Str("txn_hash", txnHash.Hex()).Msg("call GetEVMEvents") - start := time.Now() - events, err := s.store.GetEVMEvents(ctx, txnHash) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetEVMEvents")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return events, err -} - -// GetBlocksMissingExtraInfo implements sqlstore.SystemStore. -func (s *InstrumentedSystemStore) GetBlocksMissingExtraInfo( - ctx context.Context, - fromHeight *int64, -) ([]int64, error) { - start := time.Now() - blockNumbers, err := s.store.GetBlocksMissingExtraInfo(ctx, fromHeight) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetBlocksMissingExtraInfo")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return blockNumbers, err -} - -// GetBlockExtraInfo implements sqlstore.SystemStore. -func (s *InstrumentedSystemStore) GetBlockExtraInfo( - ctx context.Context, - blockNumber int64, -) (tableland.EVMBlockInfo, error) { - log.Debug().Msg("call GetBlockExtraInfo") - start := time.Now() - blockInfo, err := s.store.GetBlockExtraInfo(ctx, blockNumber) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("GetBlockExtraInfo")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return blockInfo, err -} - -// InsertBlockExtraInfo implements sqlstore.SystemStore. -func (s *InstrumentedSystemStore) InsertBlockExtraInfo(ctx context.Context, blockNumber int64, timestamp uint64) error { - start := time.Now() - err := s.store.InsertBlockExtraInfo(ctx, blockNumber, timestamp) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("InsertBlockExtraInfo")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - {Key: "chainID", Value: attribute.Int64Value(int64(s.chainID))}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return err -} - -// GetID returns node identifier. -func (s *InstrumentedSystemStore) GetID(ctx context.Context) (string, error) { - start := time.Now() - id, err := s.store.GetID(ctx) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("Id")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return id, err -} - -// Read executes a read statement on the db. -func (s *InstrumentedSystemStore) Read(ctx context.Context, stmt parsing.ReadStmt) (*tableland.TableData, error) { - start := time.Now() - data, err := s.store.Read(ctx, stmt) - latency := time.Since(start).Milliseconds() - - attributes := append([]attribute.KeyValue{ - {Key: "method", Value: attribute.StringValue("Read")}, - {Key: "success", Value: attribute.BoolValue(err == nil)}, - }, metrics.BaseAttrs...) - - s.callCount.Add(ctx, 1, attributes...) - s.latencyHistogram.Record(ctx, latency, attributes...) - - return data, err -} diff --git a/pkg/sqlstore/impl/system/store_test.go b/pkg/sqlstore/impl/system/store_test.go deleted file mode 100644 index 115a5025..00000000 --- a/pkg/sqlstore/impl/system/store_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package system - -import ( - "context" - "encoding/json" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/internal/tableland" - "github.com/textileio/go-tableland/tests" -) - -func TestEVMEventPersistence(t *testing.T) { - t.Parallel() - - ctx := context.Background() - dbURI := tests.Sqlite3URI(t) - - chainID := tableland.ChainID(1337) - - store, err := New(dbURI, chainID) - require.NoError(t, err) - - testData := []tableland.EVMEvent{ - { - Address: common.HexToAddress("0x10"), - Topics: []byte(`["0x111,"0x122"]`), - Data: []byte("data1"), - BlockNumber: 1, - TxHash: common.HexToHash("0x11"), - TxIndex: 11, - BlockHash: common.HexToHash("0x12"), - Index: 12, - ChainID: chainID, - EventJSON: []byte("eventjson1"), - EventType: "Type1", - }, - { - Address: common.HexToAddress("0x20"), - Topics: []byte(`["0x211,"0x222"]`), - Data: []byte("data2"), - BlockNumber: 2, - TxHash: common.HexToHash("0x21"), - TxIndex: 11, - BlockHash: common.HexToHash("0x22"), - Index: 12, - ChainID: chainID, - EventJSON: []byte("eventjson2"), - EventType: "Type2", - }, - } - - // Check that AreEVMEventsPersisted for the future txn hashes aren't found. - for _, event := range testData { - exists, err := store.AreEVMEventsPersisted(ctx, event.TxHash) - require.NoError(t, err) - require.False(t, exists) - } - - err = store.SaveEVMEvents(ctx, testData) - require.NoError(t, err) - - // Check that AreEVMEventsPersisted for the future txn hashes are found, and the data matches. - for _, event := range testData { - exists, err := store.AreEVMEventsPersisted(ctx, event.TxHash) - require.NoError(t, err) - require.True(t, exists) - - events, err := store.GetEVMEvents(ctx, event.TxHash) - require.NoError(t, err) - require.Len(t, events, 1) - - require.Equal(t, events[0].Address, event.Address) - require.Equal(t, events[0].Topics, event.Topics) - require.Equal(t, events[0].Data, event.Data) - require.Equal(t, events[0].BlockNumber, event.BlockNumber) - require.Equal(t, events[0].TxHash, event.TxHash) - require.Equal(t, events[0].TxIndex, event.TxIndex) - require.Equal(t, events[0].BlockHash, event.BlockHash) - require.Equal(t, events[0].Index, event.Index) - require.Equal(t, events[0].ChainID, chainID) - require.Equal(t, events[0].EventJSON, event.EventJSON) - require.Equal(t, events[0].EventType, event.EventType) - } -} - -func TestUserValue(t *testing.T) { - uv := &tableland.ColumnValue{} - - var in0 int64 = 100 - require.NoError(t, uv.Scan(in0)) - val := uv.Value() - v0, ok := val.(int64) - require.True(t, ok) - require.Equal(t, in0, v0) - b, err := json.Marshal(uv) - require.NoError(t, err) - var out0 int64 - require.NoError(t, json.Unmarshal(b, &out0)) - require.Equal(t, in0, out0) - - in1 := 100.0 - require.NoError(t, uv.Scan(in1)) - val = uv.Value() - v1, ok := val.(float64) - require.True(t, ok) - require.Equal(t, in1, v1) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out1 float64 - require.NoError(t, json.Unmarshal(b, &out1)) - require.Equal(t, in1, out1) - - in2 := true - require.NoError(t, uv.Scan(in2)) - val = uv.Value() - v2, ok := val.(bool) - require.True(t, ok) - require.Equal(t, in2, v2) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out2 bool - require.NoError(t, json.Unmarshal(b, &out2)) - require.Equal(t, in2, out2) - - in3 := []byte("hello there") - require.NoError(t, uv.Scan(in3)) - val = uv.Value() - v3, ok := val.([]byte) - require.True(t, ok) - require.Equal(t, in3, v3) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out3 []byte - require.NoError(t, json.Unmarshal(b, &out3)) - require.Equal(t, in3, out3) - - in4 := "hello" - require.NoError(t, uv.Scan(in4)) - val = uv.Value() - v4, ok := val.(string) - require.True(t, ok) - require.Equal(t, in4, v4) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out4 string - require.NoError(t, json.Unmarshal(b, &out4)) - require.Equal(t, in4, out4) - - in5 := time.Now() - require.NoError(t, uv.Scan(in5)) - val = uv.Value() - v5, ok := val.(time.Time) - require.True(t, ok) - require.Equal(t, in5, v5) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out5 time.Time - require.NoError(t, json.Unmarshal(b, &out5)) - require.Equal(t, in5.Unix(), out5.Unix()) - - var in6 interface{} - require.NoError(t, uv.Scan(in6)) - val = uv.Value() - require.Nil(t, val) - require.Equal(t, in6, val) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out6 interface{} - require.NoError(t, json.Unmarshal(b, &out6)) - require.Equal(t, in6, out6) - - in7 := "{ \"hello" - require.NoError(t, uv.Scan(in7)) - val = uv.Value() - v7, ok := val.(string) - require.True(t, ok) - require.Equal(t, in7, v7) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out7 string - require.NoError(t, json.Unmarshal(b, &out7)) - require.Equal(t, in7, out7) - - in8 := "[ \"hello" - require.NoError(t, uv.Scan(in8)) - val = uv.Value() - v8, ok := val.(string) - require.True(t, ok) - require.Equal(t, in8, v8) - b, err = json.Marshal(uv) - require.NoError(t, err) - var out8 string - require.NoError(t, json.Unmarshal(b, &out8)) - require.Equal(t, in8, out8) - - in9 := "{\"name\":\"aaron\"}" - require.NoError(t, uv.Scan(in9)) - val = uv.Value() - v9, ok := val.(json.RawMessage) - require.True(t, ok) - require.Greater(t, len(v9), 0) - b, err = json.Marshal(uv) - require.NoError(t, err) - require.Equal(t, in9, string(b)) - - in10 := "[\"one\",\"two\"]" - require.NoError(t, uv.Scan(in10)) - val = uv.Value() - v10, ok := val.(json.RawMessage) - require.True(t, ok) - require.Greater(t, len(v10), 0) - b, err = json.Marshal(uv) - require.NoError(t, err) - require.Equal(t, in10, string(b)) -} diff --git a/pkg/sqlstore/store.go b/pkg/sqlstore/store.go deleted file mode 100644 index f9bbe9e9..00000000 --- a/pkg/sqlstore/store.go +++ /dev/null @@ -1,50 +0,0 @@ -package sqlstore - -import ( - "context" - "database/sql" - - "github.com/ethereum/go-ethereum/common" - "github.com/tablelandnetwork/sqlparser" - "github.com/textileio/go-tableland/internal/tableland" - "github.com/textileio/go-tableland/pkg/eventprocessor" - "github.com/textileio/go-tableland/pkg/nonce" - "github.com/textileio/go-tableland/pkg/parsing" - "github.com/textileio/go-tableland/pkg/tables" -) - -// SystemStore defines the methods for interacting with system-wide data. -type SystemStore interface { - Read(context.Context, parsing.ReadStmt) (*tableland.TableData, error) - - GetTable(context.Context, tables.TableID) (Table, error) - GetTablesByController(context.Context, string) ([]Table, error) - - GetACLOnTableByController(context.Context, tables.TableID, string) (SystemACL, error) - - ListPendingTx(context.Context, common.Address) ([]nonce.PendingTx, error) - InsertPendingTx(context.Context, common.Address, int64, common.Hash) error - DeletePendingTxByHash(context.Context, common.Hash) error - ReplacePendingTxByHash(context.Context, common.Hash, common.Hash) error - - GetReceipt(context.Context, string) (eventprocessor.Receipt, bool, error) - - GetTablesByStructure(context.Context, string) ([]Table, error) - GetSchemaByTableName(context.Context, string) (TableSchema, error) - - AreEVMEventsPersisted(context.Context, common.Hash) (bool, error) - SaveEVMEvents(context.Context, []tableland.EVMEvent) error - GetEVMEvents(context.Context, common.Hash) ([]tableland.EVMEvent, error) - GetBlocksMissingExtraInfo(context.Context, *int64) ([]int64, error) - InsertBlockExtraInfo(context.Context, int64, uint64) error - GetBlockExtraInfo(context.Context, int64) (tableland.EVMBlockInfo, error) - - GetID(context.Context) (string, error) - - Begin(context.Context) (*sql.Tx, error) - WithTx(tx *sql.Tx) SystemStore - - SetReadResolver(resolver sqlparser.ReadStatementResolver) - - Close() error -} diff --git a/pkg/sqlstore/table.go b/pkg/sqlstore/table.go deleted file mode 100644 index 45e32486..00000000 --- a/pkg/sqlstore/table.go +++ /dev/null @@ -1,77 +0,0 @@ -package sqlstore - -import ( - "fmt" - "time" - - "github.com/textileio/go-tableland/internal/tableland" - "github.com/textileio/go-tableland/pkg/tables" -) - -// Table represents a system-wide table stored in Tableland. -type Table struct { - ID tables.TableID `json:"id"` // table id - ChainID tableland.ChainID `json:"chain_id"` - Controller string `json:"controller"` // controller address - Prefix string `json:"prefix"` - Structure string `json:"structure"` - CreatedAt time.Time `json:"created_at"` -} - -// Name returns table's full name. -func (t Table) Name() string { - return fmt.Sprintf("%s_%d_%s", t.Prefix, t.ChainID, t.ID) -} - -// TableSchema represents the schema of a table. -type TableSchema struct { - Columns []ColumnSchema - TableConstraints []string -} - -// ColumnSchema represents the schema of a column. -type ColumnSchema struct { - Name string - Type string - Constraints []string -} - -// TableMetadata represents table metadata (OpenSea standard). -type TableMetadata struct { - Name string `json:"name,omitempty"` - ExternalURL string `json:"external_url"` - Image string `json:"image"` - Message string `json:"message,omitempty"` - AnimationURL string `json:"animation_url,omitempty"` - Attributes []TableMetadataAttribute `json:"attributes,omitempty"` - Schema TableSchema `json:"schema"` -} - -// TableMetadataAttribute represents the table metadata attribute. -type TableMetadataAttribute struct { - DisplayType string `json:"display_type"` - TraitType string `json:"trait_type"` - Value interface{} `json:"value"` -} - -// SystemACL represents the system acl table. -type SystemACL struct { - Controller string - ChainID tableland.ChainID - TableID tables.TableID - Privileges tableland.Privileges - CreatedAt time.Time - UpdatedAt *time.Time -} - -// Receipt represents a Tableland receipt. -type Receipt struct { - ChainID tableland.ChainID - BlockNumber int64 - IndexInBlock int64 - TxnHash string - - TableID *tables.TableID - Error *string - ErrorEventIdx *int -} diff --git a/pkg/tables/impl/ethereum/client_test.go b/pkg/tables/impl/ethereum/client_test.go index c218af62..53c97e8c 100644 --- a/pkg/tables/impl/ethereum/client_test.go +++ b/pkg/tables/impl/ethereum/client_test.go @@ -19,8 +19,8 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/database" nonceimpl "github.com/textileio/go-tableland/pkg/nonce/impl" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum/test/controller" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum/test/erc721Enumerable" @@ -447,13 +447,13 @@ func setupWithLocalTracker(t *testing.T) ( url := tests.Sqlite3URI(t) - systemStore, err := system.New(url, tableland.ChainID(1337)) + db, err := database.Open(url, 1) require.NoError(t, err) tracker, err := nonceimpl.NewLocalTracker( context.Background(), w, - nonceimpl.NewNonceStore(systemStore), + nonceimpl.NewNonceStore(db), tableland.ChainID(1337), backend, 5*time.Second, diff --git a/tests/fullstack/fullstack.go b/tests/fullstack/fullstack.go index 19461238..ba1bbb03 100644 --- a/tests/fullstack/fullstack.go +++ b/tests/fullstack/fullstack.go @@ -1,8 +1,6 @@ package fullstack import ( - "context" - "database/sql" "encoding/hex" "net/http/httptest" "testing" @@ -13,21 +11,21 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/internal/chains" "github.com/textileio/go-tableland/internal/gateway" + gatewayimpl "github.com/textileio/go-tableland/internal/gateway/impl" "github.com/textileio/go-tableland/internal/router" "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/internal/tableland/impl" + "github.com/textileio/go-tableland/pkg/database" "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" efimpl "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed/impl" epimpl "github.com/textileio/go-tableland/pkg/eventprocessor/impl" executor "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor/impl" "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" + "github.com/textileio/go-tableland/pkg/sharedmemory" - "github.com/textileio/go-tableland/pkg/sqlstore" - sqlstoreimplsystem "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" - "github.com/textileio/go-tableland/pkg/tables" + "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/pkg/wallet" @@ -52,7 +50,7 @@ type FullStack struct { type Deps struct { DBURI string Parser parsing.SQLValidator - SystemStore sqlstore.SystemStore + Database *database.SQLiteDB ACL tableland.ACL GatewayService gateway.Gateway } @@ -74,9 +72,9 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { dbURI = tests.Sqlite3URI(t) } - systemStore := deps.SystemStore - if systemStore == nil { - systemStore, err = sqlstoreimplsystem.New(dbURI, ChainID) + db := deps.Database + if db == nil { + db, err = database.Open(dbURI, 1) require.NoError(t, err) } @@ -85,13 +83,9 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(sk))) require.NoError(t, err) - db, err := sql.Open("sqlite3", dbURI) - require.NoError(t, err) - db.SetMaxOpenConns(1) - acl := deps.ACL if acl == nil { - acl = &aclHalfMock{systemStore} + acl = impl.NewACL(db) } ex, err := executor.NewExecutor(1337, db, parser, 0, acl) @@ -102,7 +96,7 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { // Spin up dependencies needed for the EventProcessor. // i.e: Executor, Parser, and EventFeed (connected to the EVM chain) ef, err := efimpl.New( - systemStore, + efimpl.NewEventFeedStore(db), ChainID, backend, addr, @@ -121,24 +115,13 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { ep.Stop() }) - chainStacks := map[tableland.ChainID]chains.ChainStack{ - 1337: { - Store: systemStore, - EventProcessor: ep, - }, - } - - stores := make(map[tableland.ChainID]sqlstore.SystemStore, len(chainStacks)) - for chainID, stack := range chainStacks { - stack.Store.SetReadResolver(parsing.NewReadStatementResolver(sm)) - stores[chainID] = stack.Store - } - gatewayService := deps.GatewayService if gatewayService == nil { gatewayService, err = gateway.NewGateway( parser, - stores, + gatewayimpl.NewGatewayStore( + db, parsing.NewReadStatementResolver(sm), + ), "https://testnets.tableland.network", "https://tables.tableland.xyz", "https://tables.tableland.xyz", @@ -163,22 +146,3 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { Server: server, } } - -type aclHalfMock struct { - sqlStore sqlstore.SystemStore -} - -func (acl *aclHalfMock) CheckPrivileges( - ctx context.Context, - tx *sql.Tx, - controller common.Address, - id tables.TableID, - op tableland.Operation, -) (bool, error) { - aclImpl := impl.NewACL(acl.sqlStore) - return aclImpl.CheckPrivileges(ctx, tx, controller, id, op) -} - -func (acl *aclHalfMock) IsOwner(_ context.Context, _ common.Address, _ tables.TableID) (bool, error) { - return true, nil -} diff --git a/tests/sqlite3.go b/tests/sqlite3.go index 6d318833..e2078124 100644 --- a/tests/sqlite3.go +++ b/tests/sqlite3.go @@ -11,8 +11,9 @@ import ( // Sqlite3URI returns a URI to spinup an in-memory Sqlite database. func Sqlite3URI(t *testing.T) string { - dbURI := "file::" + uuid.NewString() + ":?mode=memory&cache=shared&_foreign_keys=on" + dbURI := "file::" + uuid.NewString() + ":?mode=memory&cache=shared&_foreign_keys=on&_busy_timeout=5000" db, err := sql.Open("sqlite3", dbURI) + db.SetMaxOpenConns(1) require.NoError(t, err) conn, err := db.Conn(context.Background()) require.NoError(t, err) From 7ec4212dcf4b2d9de99dfd86148968312cc22df6 Mon Sep 17 00:00:00 2001 From: Bruno Calza Date: Thu, 6 Apr 2023 15:27:01 -0300 Subject: [PATCH 05/14] removes commented code Signed-off-by: Bruno Calza --- internal/tableland/impl/acl.go | 2 -- pkg/nonce/impl/tracker_test.go | 1 - 2 files changed, 3 deletions(-) diff --git a/internal/tableland/impl/acl.go b/internal/tableland/impl/acl.go index 423c1b87..94833da7 100644 --- a/internal/tableland/impl/acl.go +++ b/internal/tableland/impl/acl.go @@ -94,6 +94,4 @@ type SystemACL struct { ChainID tableland.ChainID TableID tables.TableID Privileges tableland.Privileges - // CreatedAt time.Time - // UpdatedAt *time.Time } diff --git a/pkg/nonce/impl/tracker_test.go b/pkg/nonce/impl/tracker_test.go index 73f858a8..f1dce8fb 100644 --- a/pkg/nonce/impl/tracker_test.go +++ b/pkg/nonce/impl/tracker_test.go @@ -150,7 +150,6 @@ func TestInitialization(t *testing.T) { require.NoError(t, err) db, err := database.Open(url, 1) - // sqlstore, err := system.New(url, tableland.ChainID(1337)) require.NoError(t, err) // initialize without pending txs From 17aeeb54992ec5f3134a13158162989cde67540e Mon Sep 17 00:00:00 2001 From: Bruno Calza Date: Fri, 7 Apr 2023 09:51:08 -0300 Subject: [PATCH 06/14] renames function for clarity Signed-off-by: Bruno Calza --- internal/tableland/impl/acl.go | 5 +++-- pkg/eventprocessor/eventfeed/impl/eventfeed_test.go | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/internal/tableland/impl/acl.go b/internal/tableland/impl/acl.go index 94833da7..0c883b1b 100644 --- a/internal/tableland/impl/acl.go +++ b/internal/tableland/impl/acl.go @@ -48,7 +48,7 @@ func (acl *ACLStore) CheckPrivileges( return false, fmt.Errorf("privileges lookup: %s", err) } - aclRule, err := aclFromSQLtoDTO(row) + aclRule, err := transformToObject(row) if err != nil { return false, fmt.Errorf("transforming to dto: %s", err) } @@ -61,7 +61,8 @@ func (acl *ACLStore) CheckPrivileges( return true, nil } -func aclFromSQLtoDTO(acl db.SystemAcl) (SystemACL, error) { +// transforms the ACL data transfer object to ACL object model. +func transformToObject(acl db.SystemAcl) (SystemACL, error) { id, err := tables.NewTableIDFromInt64(acl.TableID) if err != nil { return SystemACL{}, fmt.Errorf("parsing id to string: %s", err) diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go index 5145ebb5..b3bfee5b 100644 --- a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go @@ -354,10 +354,10 @@ func TestDuplicateEvents(t *testing.T) { db, err := database.Open(dbURI, 1) require.NoError(t, err) - efStore := NewEventFeedStore(db) + eventStore := NewEventFeedStore(db) ef, err := New( - efStore, + eventStore, 1337, duplicateEventsChainClient{}, common.HexToAddress("0x0b9737ab4b3e5303cb67db031b509697e31c02d3"), @@ -377,7 +377,7 @@ func TestDuplicateEvents(t *testing.T) { select { case bes := <-ch: - persistedEvents, err := efStore.GetEVMEvents(context.Background(), 1337, bes.Txns[0].TxnHash) + persistedEvents, err := eventStore.GetEVMEvents(context.Background(), 1337, bes.Txns[0].TxnHash) require.NoError(t, err) require.Len(t, persistedEvents, 1) From 6e5f4ba0c3423371f4159b13643da52c0a83a4f5 Mon Sep 17 00:00:00 2001 From: Bruno Calza Date: Tue, 11 Apr 2023 14:06:17 -0300 Subject: [PATCH 07/14] adds createdat to metadata Signed-off-by: Bruno Calza --- internal/gateway/impl/gateway_store.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/gateway/impl/gateway_store.go b/internal/gateway/impl/gateway_store.go index 389b2cc2..e5f30ec8 100644 --- a/internal/gateway/impl/gateway_store.go +++ b/internal/gateway/impl/gateway_store.go @@ -5,6 +5,7 @@ import ( "database/sql" "fmt" "strings" + "time" "github.com/tablelandnetwork/sqlparser" "github.com/textileio/go-tableland/internal/gateway" @@ -69,6 +70,7 @@ func (s *GatewayStore) GetTable( Controller: table.Controller, Prefix: table.Prefix, Structure: table.Structure, + CreatedAt: time.Unix(table.CreatedAt, 0), }, nil } From de4bb1e316b261ec31f64d75a71da5e84f4ceeca Mon Sep 17 00:00:00 2001 From: Bruno Calza Date: Tue, 11 Apr 2023 14:57:20 -0300 Subject: [PATCH 08/14] changes the otel middleware order we were missing importante http metrics because otel was the last middleware Signed-off-by: Bruno Calza --- .../dashboards/validator-dashboard.json | 129 ++---------------- internal/router/router.go | 2 +- 2 files changed, 10 insertions(+), 121 deletions(-) diff --git a/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json b/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json index 3ee09a1a..cd50c87c 100644 --- a/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json +++ b/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json @@ -1196,7 +1196,7 @@ "refId": "A" } ], - "title": "APIs", + "title": "API", "type": "row" }, { @@ -1279,117 +1279,6 @@ "x": 0, "y": 33 }, - "id": 37, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "P1809F7CD0C75ACF3" - }, - "editorMode": "code", - "exemplar": true, - "expr": "sum by (http_status_code) (rate(http_server_request_count_total{service_name=\"tableland:api\"}[10m]))", - "interval": "", - "legendFormat": "{{http_status_code}}-{{http_server_name}}", - "range": true, - "refId": "A" - } - ], - "title": "Status Code Rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "P1809F7CD0C75ACF3" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "reqps" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "500" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "red", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 6, - "y": 33 - }, "id": 79, "options": { "legend": { @@ -1411,7 +1300,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (service_name) (rate(http_server_request_count_total{service_name=\"tableland:api\"}[5m]))", + "expr": "sum by (http_status_code) (rate(http_server_request_count_total{service_name=\"tableland:api\"}[$__rate_interval]))", "interval": "", "legendFormat": "__auto", "range": true, @@ -1507,7 +1396,7 @@ "gridPos": { "h": 8, "w": 6, - "x": 12, + "x": 6, "y": 33 }, "id": 15, @@ -1531,7 +1420,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{}[5m])) by (service_name, le))", + "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{service_name=\"tableland:api\"}[$__rate_interval])) by (http_status_code, le))", "interval": "", "legendFormat": "__auto", "range": true, @@ -1618,7 +1507,7 @@ "gridPos": { "h": 8, "w": 6, - "x": 18, + "x": 12, "y": 33 }, "id": 80, @@ -1649,7 +1538,7 @@ "refId": "A" } ], - "title": "API Response bytes/s ", + "title": "Response bytes/s ", "type": "timeseries" }, { @@ -1749,7 +1638,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (method) (\n rate(tableland_gateway_call_latency_count{service_name=\"tableland:api\"}[5m])\n)", + "expr": "sum by (method) (\n rate(tableland_gateway_call_latency_count{service_name=\"tableland:api\"}[$__rate_interval])\n)", "interval": "", "legendFormat": "{{method}}", "range": true, @@ -1847,7 +1736,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "histogram_quantile(0.95, sum(rate(tableland_gateway_call_latency_bucket{service_name=\"tableland:api\"}[5m])) by (le, method))", + "expr": "histogram_quantile(0.95, sum(rate(tableland_gateway_call_latency_bucket{service_name=\"tableland:api\"}[$__rate_interval])) by (le, method))", "format": "heatmap", "instant": false, "interval": "", @@ -2692,6 +2581,6 @@ "timezone": "", "title": "Validator", "uid": "2Le7qt_7z", - "version": 7, + "version": 19, "weekStart": "" } \ No newline at end of file diff --git a/internal/router/router.go b/internal/router/router.go index 25e6e190..29f07d6f 100644 --- a/internal/router/router.go +++ b/internal/router/router.go @@ -98,7 +98,7 @@ func configureAPIV1Routes( router.get( pathTemplate, endpoint.handler, - append(endpoint.middlewares, middlewares.OtelHTTP(routeName))..., + append([]mux.MiddlewareFunc{middlewares.OtelHTTP(routeName)}, endpoint.middlewares...)..., ) return nil }); err != nil { From 116264fa250a385b72c6746388b769861e7e7051 Mon Sep 17 00:00:00 2001 From: Bruno Calza Date: Tue, 25 Apr 2023 11:27:06 -0300 Subject: [PATCH 09/14] makes use of only one sql.DB [stagingdeploy] the use of a serializable database was causing locking problems. we're going to rely on SQLite locking mechanism. Signed-off-by: Bruno Calza --- cmd/api/main.go | 24 ++++--------- .../dashboards/validator-dashboard.json | 36 ++++++++----------- internal/gateway/impl/gateway_store_test.go | 6 ++-- internal/tableland/impl/tableland_test.go | 2 +- pkg/database/sqlite_db.go | 16 +-------- .../eventfeed/impl/eventfeed_store_test.go | 2 +- .../eventfeed/impl/eventfeed_test.go | 8 ++--- .../impl/eventprocessor_replayhistory_test.go | 3 +- .../impl/eventprocessor_test.go | 6 ++-- .../impl/executor/impl/executor_test.go | 2 +- pkg/nonce/impl/tracker_test.go | 8 ++--- pkg/tables/impl/ethereum/client_test.go | 2 +- tests/fullstack/fullstack.go | 2 +- tests/sqlite3.go | 1 - 14 files changed, 43 insertions(+), 75 deletions(-) diff --git a/cmd/api/main.go b/cmd/api/main.go index aa198f75..57732e0c 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -72,12 +72,7 @@ func main() { path.Join(dirPath, "database.db"), ) - serializableDB, err := database.OpenSerializable(databaseURL, attribute.String("database", "main")) - if err != nil { - log.Fatal().Err(err).Msg("opening the database") - } - - concurrentDB, err := database.OpenConcurrent(databaseURL, attribute.String("database", "main")) + db, err := database.Open(databaseURL, attribute.String("database", "main")) if err != nil { log.Fatal().Err(err).Msg("opening the read database") } @@ -99,7 +94,7 @@ func main() { // Chain stacks. chainStacks, closeChainStacks, err := createChainStacks( - serializableDB, + db, parser, sm, config.Chains, @@ -110,7 +105,7 @@ func main() { } // HTTP API server. - closeHTTPServer, err := createAPIServer(config.HTTP, config.Gateway, parser, concurrentDB, sm, chainStacks) + closeHTTPServer, err := createAPIServer(config.HTTP, config.Gateway, parser, db, sm, chainStacks) if err != nil { log.Fatal().Err(err).Msg("creating HTTP server") } @@ -125,7 +120,7 @@ func main() { } // Telemetry - closeTelemetryModule, err := configureTelemetry(dirPath, concurrentDB, chainStacks, config.TelemetryPublisher) + closeTelemetryModule, err := configureTelemetry(dirPath, db, chainStacks, config.TelemetryPublisher) if err != nil { log.Fatal().Err(err).Msg("configuring telemetry") } @@ -152,14 +147,9 @@ func main() { log.Error().Err(err).Msg("closing backuper") } - // Close serializable database - if err := serializableDB.Close(); err != nil { - log.Error().Err(err).Msg("closing serializable db backuper") - } - - // Close concurrent database - if err := concurrentDB.Close(); err != nil { - log.Error().Err(err).Msg("closing concurrent db backuper") + // Close database + if err := db.Close(); err != nil { + log.Error().Err(err).Msg("closing db") } // Close telemetry. diff --git a/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json b/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json index cd50c87c..d279d161 100644 --- a/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json +++ b/docker/observability/grafana/provisioning/dashboards/validator-dashboard.json @@ -1374,7 +1374,7 @@ "options": { "mode": "exclude", "names": [ - "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{}[5m])) by (le))" + "Value" ], "prefix": "All except:", "readOnly": true @@ -1420,9 +1420,9 @@ }, "editorMode": "code", "exemplar": true, - "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{service_name=\"tableland:api\"}[$__rate_interval])) by (http_status_code, le))", + "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{service_name=\"tableland:api\"}[$__rate_interval])) by (le))", "interval": "", - "legendFormat": "__auto", + "legendFormat": "{{label_name}}", "range": true, "refId": "A" } @@ -1531,7 +1531,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "rate(http_server_response_content_length_total[$__rate_interval])", + "expr": "sum (rate(http_server_response_content_length_total[$__rate_interval]))", "interval": "", "legendFormat": "{{http_status_code}}", "range": true, @@ -1818,8 +1818,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1910,8 +1909,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2004,8 +2002,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2103,8 +2100,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2227,8 +2223,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2323,8 +2318,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2419,8 +2413,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2515,8 +2508,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2574,13 +2566,13 @@ "list": [] }, "time": { - "from": "now-30m", + "from": "now-5m", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Validator", "uid": "2Le7qt_7z", - "version": 19, + "version": 23, "weekStart": "" } \ No newline at end of file diff --git a/internal/gateway/impl/gateway_store_test.go b/internal/gateway/impl/gateway_store_test.go index 579ba2cc..7176d223 100644 --- a/internal/gateway/impl/gateway_store_test.go +++ b/internal/gateway/impl/gateway_store_test.go @@ -61,7 +61,7 @@ func TestGateway(t *testing.T) { parser, err := parserimpl.New([]string{"system_", "registry"}) require.NoError(t, err) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) // populate the registry with a table ex, err := executor.NewExecutor(chainID, db, parser, 0, nil) @@ -114,7 +114,7 @@ func TestGetMetadata(t *testing.T) { parser, err := parserimpl.New([]string{"system_", "registry"}) require.NoError(t, err) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) // populate the registry with a table @@ -267,7 +267,7 @@ func TestQueryConstraints(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) parsingOpts := []parsing.Option{ diff --git a/internal/tableland/impl/tableland_test.go b/internal/tableland/impl/tableland_test.go index dc9de7c0..104e63ec 100644 --- a/internal/tableland/impl/tableland_test.go +++ b/internal/tableland/impl/tableland_test.go @@ -721,7 +721,7 @@ func (b *tablelandSetupBuilder) build(t *testing.T) *tablelandSetup { dbURI := tests.Sqlite3URI(t) ctx := context.Background() - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}, b.parsingOpts...) diff --git a/pkg/database/sqlite_db.go b/pkg/database/sqlite_db.go index 27cb7f5c..40dafc35 100644 --- a/pkg/database/sqlite_db.go +++ b/pkg/database/sqlite_db.go @@ -25,21 +25,8 @@ type SQLiteDB struct { Log zerolog.Logger } -// OpenSerializable opens a dSQLite database with only one connection open per time. -func OpenSerializable(path string, attributes ...attribute.KeyValue) (*SQLiteDB, error) { - attributes = append(attributes, attribute.String("type", "serializable")) - return Open(path, 1, attributes...) -} - -// OpenConcurrent opens a SQLite database that allows multiple connections. -// Should be used for reads. -func OpenConcurrent(path string, attributes ...attribute.KeyValue) (*SQLiteDB, error) { - attributes = append(attributes, attribute.String("type", "concurrent")) - return Open(path, 0, attributes...) -} - // Open opens a new SQLite database. -func Open(path string, maxOpenConnections int, attributes ...attribute.KeyValue) (*SQLiteDB, error) { +func Open(path string, attributes ...attribute.KeyValue) (*SQLiteDB, error) { log := logger.With(). Str("component", "db"). Logger() @@ -49,7 +36,6 @@ func Open(path string, maxOpenConnections int, attributes ...attribute.KeyValue) if err != nil { return nil, fmt.Errorf("connecting to db: %s", err) } - sqlDB.SetMaxOpenConns(maxOpenConnections) if err := otelsql.RegisterDBStatsMetrics(sqlDB, otelsql.WithAttributes( attributes..., diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go b/pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go index 4da30aa7..a836897e 100644 --- a/pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed_store_test.go @@ -20,7 +20,7 @@ func TestEVMEventPersistence(t *testing.T) { chainID := tableland.ChainID(1337) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) store := NewEventFeedStore(db) diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go index b3bfee5b..77d9b338 100644 --- a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go @@ -30,7 +30,7 @@ func TestRunSQLEvents(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) backend, addr, sc, authOpts, _ := testutil.Setup(t) @@ -111,7 +111,7 @@ func TestAllEvents(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) store := NewEventFeedStore(db) @@ -302,7 +302,7 @@ func TestInfura(t *testing.T) { rinkebyContractAddr := common.HexToAddress("0x847645b7dAA32eFda757d3c10f1c82BFbB7b41D0") dbURI := tests.Sqlite3URI(t) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) ef, err := New( NewEventFeedStore(db), @@ -351,7 +351,7 @@ func TestDuplicateEvents(t *testing.T) { t.Parallel() dbURI := tests.Sqlite3URI(t) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) eventStore := NewEventFeedStore(db) diff --git a/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go b/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go index b4e9c5fd..357cb29f 100644 --- a/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go +++ b/pkg/eventprocessor/impl/eventprocessor_replayhistory_test.go @@ -80,7 +80,8 @@ func launchValidatorForAllChainsBackedByEVMHistory(t *testing.T, historyDBURI st parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) + db.DB.SetMaxOpenConns(1) require.NoError(t, err) chains := getChains(t, historyDBURI) diff --git a/pkg/eventprocessor/impl/eventprocessor_test.go b/pkg/eventprocessor/impl/eventprocessor_test.go index a122f53a..418818d6 100644 --- a/pkg/eventprocessor/impl/eventprocessor_test.go +++ b/pkg/eventprocessor/impl/eventprocessor_test.go @@ -319,7 +319,7 @@ func setup(t *testing.T) ( parser, err := parserimpl.New([]string{"system_", "registry", "sqlite_"}) require.NoError(t, err) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) sm := sharedmemory.NewSharedMemory() @@ -327,7 +327,7 @@ func setup(t *testing.T) ( require.NoError(t, err) - db2, err := database.Open(dbURI, 1) + db2, err := database.Open(dbURI) require.NoError(t, err) ef, err := efimpl.New( @@ -379,7 +379,7 @@ func setup(t *testing.T) ( } require.NoError(t, err) - db, err = database.Open(dbURI, 1) + db, err = database.Open(dbURI) require.NoError(t, err) tableReader := func(readQuery string) []int64 { diff --git a/pkg/eventprocessor/impl/executor/impl/executor_test.go b/pkg/eventprocessor/impl/executor/impl/executor_test.go index 7ccf4377..0aaca7f9 100644 --- a/pkg/eventprocessor/impl/executor/impl/executor_test.go +++ b/pkg/eventprocessor/impl/executor/impl/executor_test.go @@ -211,7 +211,7 @@ func newExecutor(t *testing.T, rowsLimit int) (*Executor, string) { parser := newParser(t, []string{}) - db, err := database.Open(dbURI, 1) + db, err := database.Open(dbURI) require.NoError(t, err) exec, err := NewExecutor(1337, db, parser, rowsLimit, impl.NewACL(db)) diff --git a/pkg/nonce/impl/tracker_test.go b/pkg/nonce/impl/tracker_test.go index f1dce8fb..d1eb8ec0 100644 --- a/pkg/nonce/impl/tracker_test.go +++ b/pkg/nonce/impl/tracker_test.go @@ -149,7 +149,7 @@ func TestInitialization(t *testing.T) { wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - db, err := database.Open(url, 1) + db, err := database.Open(url) require.NoError(t, err) // initialize without pending txs @@ -224,7 +224,7 @@ func TestMinBlockDepth(t *testing.T) { wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - db, err := database.Open(url, 1) + db, err := database.Open(url) require.NoError(t, err) testAddress := wallet.Address() @@ -384,7 +384,7 @@ func TestCheckIfPendingTxIsStuck(t *testing.T) { wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - db, err := database.Open(url, 1) + db, err := database.Open(url) require.NoError(t, err) testAddress := wallet.Address() @@ -505,7 +505,7 @@ func setup(ctx context.Context, t *testing.T) ( wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) - db, err := database.Open(url, 1) + db, err := database.Open(url) require.NoError(t, err) tracker, err := NewLocalTracker( diff --git a/pkg/tables/impl/ethereum/client_test.go b/pkg/tables/impl/ethereum/client_test.go index 53c97e8c..c196ca7b 100644 --- a/pkg/tables/impl/ethereum/client_test.go +++ b/pkg/tables/impl/ethereum/client_test.go @@ -447,7 +447,7 @@ func setupWithLocalTracker(t *testing.T) ( url := tests.Sqlite3URI(t) - db, err := database.Open(url, 1) + db, err := database.Open(url) require.NoError(t, err) tracker, err := nonceimpl.NewLocalTracker( diff --git a/tests/fullstack/fullstack.go b/tests/fullstack/fullstack.go index ba1bbb03..1932a4ce 100644 --- a/tests/fullstack/fullstack.go +++ b/tests/fullstack/fullstack.go @@ -74,7 +74,7 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { db := deps.Database if db == nil { - db, err = database.Open(dbURI, 1) + db, err = database.Open(dbURI) require.NoError(t, err) } diff --git a/tests/sqlite3.go b/tests/sqlite3.go index e2078124..d7d794dd 100644 --- a/tests/sqlite3.go +++ b/tests/sqlite3.go @@ -13,7 +13,6 @@ import ( func Sqlite3URI(t *testing.T) string { dbURI := "file::" + uuid.NewString() + ":?mode=memory&cache=shared&_foreign_keys=on&_busy_timeout=5000" db, err := sql.Open("sqlite3", dbURI) - db.SetMaxOpenConns(1) require.NoError(t, err) conn, err := db.Conn(context.Background()) require.NoError(t, err) From 3754f54396a9411ee2c0d49d9ef470b2f8d6c5a9 Mon Sep 17 00:00:00 2001 From: avichalp Date: Mon, 10 Apr 2023 15:40:39 +0900 Subject: [PATCH 10/14] Refactor client providers Signed-off-by: avichalp --- .../counterprobe/counterprobe_test.go | 2 +- cmd/healthbot/main.go | 18 ++++++-- pkg/client/v1/client.go | 46 +++++++++++-------- pkg/client/v1/client_test.go | 1 + 4 files changed, 42 insertions(+), 25 deletions(-) diff --git a/cmd/healthbot/counterprobe/counterprobe_test.go b/cmd/healthbot/counterprobe/counterprobe_test.go index 58df5391..bb1cc7a5 100644 --- a/cmd/healthbot/counterprobe/counterprobe_test.go +++ b/cmd/healthbot/counterprobe/counterprobe_test.go @@ -19,7 +19,7 @@ func TestProduction(t *testing.T) { require.NoError(t, err) chain := client.Chains[client.ChainIDs.Optimism] - client, err := clientV1.NewClient(ctx, wallet, clientV1.NewClientChain(chain)) + client, err := clientV1.NewClient(ctx, wallet, clientV1.Alchemy, clientV1.NewClientChain(chain)) require.NoError(t, err) cp, err := New("optimism-mainnet", client, "Runbook_24", time.Second, time.Second*10, 1, 1) diff --git a/cmd/healthbot/main.go b/cmd/healthbot/main.go index 711498d5..82822759 100644 --- a/cmd/healthbot/main.go +++ b/cmd/healthbot/main.go @@ -57,15 +57,23 @@ func main() { if chainCfg.OverrideClient.ContractAddr != "" { chain.ContractAddr = common.HexToAddress(chainCfg.OverrideClient.ContractAddr) } - // For Filecoin Hyperspace, we use Ankr endpoint - opts := []clientV1.NewClientOption{clientV1.NewClientChain(chain)} + var client *clientV1.Client if chain.ID == 3141 { - opts = append(opts, clientV1.NewClientAnkrAPIKey(chainCfg.AnkrAPIKey)) + client, err = clientV1.NewClient( + ctx, wallet, + clientV1.Ankr, + clientV1.NewClientChain(chain), + clientV1.NewClientAnkrAPIKey(chainCfg.AlchemyAPIKey), + ) } else { - opts = append(opts, clientV1.NewClientAlchemyAPIKey(chainCfg.AlchemyAPIKey)) + client, err = clientV1.NewClient( + ctx, wallet, + clientV1.Alchemy, + clientV1.NewClientChain(chain), + clientV1.NewClientAlchemyAPIKey(chainCfg.AlchemyAPIKey), + ) } - client, err := clientV1.NewClient(ctx, wallet, opts...) if err != nil { log.Fatal().Err(err).Msg("error creating tbl client") } diff --git a/pkg/client/v1/client.go b/pkg/client/v1/client.go index ac002900..5022a918 100644 --- a/pkg/client/v1/client.go +++ b/pkg/client/v1/client.go @@ -32,6 +32,14 @@ type Client struct { baseURL *url.URL } +type Provider int + +const ( + Infura Provider = iota + Alchemy + Ankr +) + type config struct { chain *client.Chain infuraAPIKey string @@ -39,6 +47,7 @@ type config struct { ankrAPIKey string local bool contractBackend bind.ContractBackend + provider Provider } // NewClientOption controls the behavior of NewClient. @@ -87,8 +96,9 @@ func NewClientContractBackend(backend bind.ContractBackend) NewClientOption { } // NewClient creates a new Client. -func NewClient(ctx context.Context, wallet *wallet.Wallet, opts ...NewClientOption) (*Client, error) { +func NewClient(ctx context.Context, wallet *wallet.Wallet, provider Provider, opts ...NewClientOption) (*Client, error) { config := config{chain: &defaultChain} + config.provider = provider for _, opt := range opts { opt(&config) } @@ -141,37 +151,35 @@ func NewClient(ctx context.Context, wallet *wallet.Wallet, opts ...NewClientOpti } func getContractBackend(ctx context.Context, config config) (bind.ContractBackend, error) { - if config.contractBackend != nil && config.infuraAPIKey == "" && config.alchemyAPIKey == "" { + if config.contractBackend != nil { return config.contractBackend, nil - } else if config.infuraAPIKey != "" && config.contractBackend == nil && - config.alchemyAPIKey == "" && config.ankrAPIKey == "" { + } + + var rpcURL string + switch config.provider { + case Infura: tmpl, found := client.InfuraURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Infura", config.chain.ID) } - return ethclient.DialContext(ctx, fmt.Sprintf(tmpl, config.infuraAPIKey)) - } else if config.alchemyAPIKey != "" && config.contractBackend == nil && - config.infuraAPIKey == "" && config.ankrAPIKey == "" { + rpcURL = fmt.Sprintf(tmpl, config.infuraAPIKey) + case Alchemy: tmpl, found := client.AlchemyURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Alchemy", config.chain.ID) - } - return ethclient.DialContext(ctx, fmt.Sprintf(tmpl, config.alchemyAPIKey)) - } else if config.ankrAPIKey != "" && config.contractBackend == nil && - config.infuraAPIKey == "" && config.alchemyAPIKey == "" { + } + rpcURL = fmt.Sprintf(tmpl, config.alchemyAPIKey) + case Ankr: tmpl, found := client.AnkrURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Ankr", config.chain.ID) } - return ethclient.DialContext(ctx, fmt.Sprintf(tmpl, config.ankrAPIKey)) - } else if config.local { - url, found := client.LocalURLs[config.chain.ID] - if !found { - return nil, fmt.Errorf("chain id %v not supported for Local", config.chain.ID) - } - return ethclient.DialContext(ctx, url) + rpcURL = fmt.Sprintf(tmpl, config.ankrAPIKey) + default: + return nil, errors.New("no provider or ETH backend specified") } - return nil, errors.New("no provider specified, must provide an Infura API key, Alchemy API key, or an ETH backend") + + return ethclient.DialContext(ctx, rpcURL) } // TableID is the ID of a Table. diff --git a/pkg/client/v1/client_test.go b/pkg/client/v1/client_test.go index 5739ad59..a8be4889 100644 --- a/pkg/client/v1/client_test.go +++ b/pkg/client/v1/client_test.go @@ -207,6 +207,7 @@ func setup(t *testing.T) clientCalls { client, err := NewClient( context.Background(), stack.Wallet, + Alchemy, NewClientChain(c), NewClientContractBackend(stack.Backend)) require.NoError(t, err) From 504a31c698b6715c46fe7bdbbac18fd2f5737b54 Mon Sep 17 00:00:00 2001 From: avichalp Date: Thu, 20 Apr 2023 15:06:35 +0900 Subject: [PATCH 11/14] Avoid changing args for NewClient func Signed-off-by: avichalp --- .../counterprobe/counterprobe_test.go | 2 +- cmd/healthbot/main.go | 17 +++------ pkg/client/v1/client.go | 35 +++++++++++++------ pkg/client/v1/client_test.go | 1 - 4 files changed, 30 insertions(+), 25 deletions(-) diff --git a/cmd/healthbot/counterprobe/counterprobe_test.go b/cmd/healthbot/counterprobe/counterprobe_test.go index bb1cc7a5..58df5391 100644 --- a/cmd/healthbot/counterprobe/counterprobe_test.go +++ b/cmd/healthbot/counterprobe/counterprobe_test.go @@ -19,7 +19,7 @@ func TestProduction(t *testing.T) { require.NoError(t, err) chain := client.Chains[client.ChainIDs.Optimism] - client, err := clientV1.NewClient(ctx, wallet, clientV1.Alchemy, clientV1.NewClientChain(chain)) + client, err := clientV1.NewClient(ctx, wallet, clientV1.NewClientChain(chain)) require.NoError(t, err) cp, err := New("optimism-mainnet", client, "Runbook_24", time.Second, time.Second*10, 1, 1) diff --git a/cmd/healthbot/main.go b/cmd/healthbot/main.go index 82822759..059f2a60 100644 --- a/cmd/healthbot/main.go +++ b/cmd/healthbot/main.go @@ -58,22 +58,13 @@ func main() { chain.ContractAddr = common.HexToAddress(chainCfg.OverrideClient.ContractAddr) } // For Filecoin Hyperspace, we use Ankr endpoint - var client *clientV1.Client + opts := []clientV1.NewClientOption{clientV1.NewClientChain(chain)} if chain.ID == 3141 { - client, err = clientV1.NewClient( - ctx, wallet, - clientV1.Ankr, - clientV1.NewClientChain(chain), - clientV1.NewClientAnkrAPIKey(chainCfg.AlchemyAPIKey), - ) + opts = append(opts, clientV1.NewClientAnkrAPIKey(chainCfg.AnkrAPIKey)) } else { - client, err = clientV1.NewClient( - ctx, wallet, - clientV1.Alchemy, - clientV1.NewClientChain(chain), - clientV1.NewClientAlchemyAPIKey(chainCfg.AlchemyAPIKey), - ) + opts = append(opts, clientV1.NewClientAlchemyAPIKey(chainCfg.AlchemyAPIKey)) } + client, err := clientV1.NewClient(ctx, wallet, opts...) if err != nil { log.Fatal().Err(err).Msg("error creating tbl client") } diff --git a/pkg/client/v1/client.go b/pkg/client/v1/client.go index 5022a918..08f13eee 100644 --- a/pkg/client/v1/client.go +++ b/pkg/client/v1/client.go @@ -32,12 +32,18 @@ type Client struct { baseURL *url.URL } +// Provider is the type of backend RPC Provider. type Provider int const ( + // Infura specify the Infura RPC Provider. Infura Provider = iota - Alchemy + // Alchemy specify the Alchemy RPC Provider. + Alchemy + // Ankr specify the Ankr RPC Provider. Ankr + // Local specify use of local RPC. + Local ) type config struct { @@ -47,7 +53,7 @@ type config struct { ankrAPIKey string local bool contractBackend bind.ContractBackend - provider Provider + provider Provider } // NewClientOption controls the behavior of NewClient. @@ -64,6 +70,7 @@ func NewClientChain(chain client.Chain) NewClientOption { func NewClientInfuraAPIKey(key string) NewClientOption { return func(c *config) { c.infuraAPIKey = key + c.provider = Infura } } @@ -71,6 +78,7 @@ func NewClientInfuraAPIKey(key string) NewClientOption { func NewClientAlchemyAPIKey(key string) NewClientOption { return func(c *config) { c.alchemyAPIKey = key + c.provider = Alchemy } } @@ -78,6 +86,7 @@ func NewClientAlchemyAPIKey(key string) NewClientOption { func NewClientAnkrAPIKey(key string) NewClientOption { return func(c *config) { c.ankrAPIKey = key + c.provider = Ankr } } @@ -85,6 +94,7 @@ func NewClientAnkrAPIKey(key string) NewClientOption { func NewClientLocal() NewClientOption { return func(c *config) { c.local = true + c.provider = Local } } @@ -96,9 +106,8 @@ func NewClientContractBackend(backend bind.ContractBackend) NewClientOption { } // NewClient creates a new Client. -func NewClient(ctx context.Context, wallet *wallet.Wallet, provider Provider, opts ...NewClientOption) (*Client, error) { +func NewClient(ctx context.Context, wallet *wallet.Wallet, opts ...NewClientOption) (*Client, error) { config := config{chain: &defaultChain} - config.provider = provider for _, opt := range opts { opt(&config) } @@ -153,9 +162,9 @@ func NewClient(ctx context.Context, wallet *wallet.Wallet, provider Provider, op func getContractBackend(ctx context.Context, config config) (bind.ContractBackend, error) { if config.contractBackend != nil { return config.contractBackend, nil - } - - var rpcURL string + } + + var rpcURL string switch config.provider { case Infura: tmpl, found := client.InfuraURLs[config.chain.ID] @@ -167,7 +176,7 @@ func getContractBackend(ctx context.Context, config config) (bind.ContractBacken tmpl, found := client.AlchemyURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Alchemy", config.chain.ID) - } + } rpcURL = fmt.Sprintf(tmpl, config.alchemyAPIKey) case Ankr: tmpl, found := client.AnkrURLs[config.chain.ID] @@ -175,11 +184,17 @@ func getContractBackend(ctx context.Context, config config) (bind.ContractBacken return nil, fmt.Errorf("chain id %v not supported for Ankr", config.chain.ID) } rpcURL = fmt.Sprintf(tmpl, config.ankrAPIKey) + case Local: + tmpl, found := client.LocalURLs[config.chain.ID] + if !found { + return nil, fmt.Errorf("chain id %v not supported for Local", config.chain.ID) + } + rpcURL = tmpl default: return nil, errors.New("no provider or ETH backend specified") } - - return ethclient.DialContext(ctx, rpcURL) + + return ethclient.DialContext(ctx, rpcURL) } // TableID is the ID of a Table. diff --git a/pkg/client/v1/client_test.go b/pkg/client/v1/client_test.go index a8be4889..5739ad59 100644 --- a/pkg/client/v1/client_test.go +++ b/pkg/client/v1/client_test.go @@ -207,7 +207,6 @@ func setup(t *testing.T) clientCalls { client, err := NewClient( context.Background(), stack.Wallet, - Alchemy, NewClientChain(c), NewClientContractBackend(stack.Backend)) require.NoError(t, err) From 461a966619b02c761f702bf83fadf44ff7973fa4 Mon Sep 17 00:00:00 2001 From: avichalp Date: Thu, 20 Apr 2023 15:30:47 +0900 Subject: [PATCH 12/14] Simplify the provider type [stagingdeploy] Signed-off-by: avichalp --- cmd/healthbot/main.go | 1 + pkg/client/v1/client.go | 62 ++++++++++++++--------------------------- 2 files changed, 22 insertions(+), 41 deletions(-) diff --git a/cmd/healthbot/main.go b/cmd/healthbot/main.go index 059f2a60..711498d5 100644 --- a/cmd/healthbot/main.go +++ b/cmd/healthbot/main.go @@ -57,6 +57,7 @@ func main() { if chainCfg.OverrideClient.ContractAddr != "" { chain.ContractAddr = common.HexToAddress(chainCfg.OverrideClient.ContractAddr) } + // For Filecoin Hyperspace, we use Ankr endpoint opts := []clientV1.NewClientOption{clientV1.NewClientChain(chain)} if chain.ID == 3141 { diff --git a/pkg/client/v1/client.go b/pkg/client/v1/client.go index 08f13eee..6abf2a25 100644 --- a/pkg/client/v1/client.go +++ b/pkg/client/v1/client.go @@ -32,28 +32,15 @@ type Client struct { baseURL *url.URL } -// Provider is the type of backend RPC Provider. -type Provider int - -const ( - // Infura specify the Infura RPC Provider. - Infura Provider = iota - // Alchemy specify the Alchemy RPC Provider. - Alchemy - // Ankr specify the Ankr RPC Provider. - Ankr - // Local specify use of local RPC. - Local -) +type provider struct { + name string + apiKey string +} type config struct { chain *client.Chain - infuraAPIKey string - alchemyAPIKey string - ankrAPIKey string - local bool contractBackend bind.ContractBackend - provider Provider + provider provider } // NewClientOption controls the behavior of NewClient. @@ -69,32 +56,28 @@ func NewClientChain(chain client.Chain) NewClientOption { // NewClientInfuraAPIKey specifies an Infura API to use when creating an EVM backend. func NewClientInfuraAPIKey(key string) NewClientOption { return func(c *config) { - c.infuraAPIKey = key - c.provider = Infura + c.provider = provider{name: "Infura", apiKey: key} } } // NewClientAlchemyAPIKey specifies an Alchemy API to use when creating an EVM backend. func NewClientAlchemyAPIKey(key string) NewClientOption { return func(c *config) { - c.alchemyAPIKey = key - c.provider = Alchemy + c.provider = provider{name: "Alchemy", apiKey: key} } } // NewClientAnkrAPIKey specifies an Ankr API to use when creating an EVM backend. func NewClientAnkrAPIKey(key string) NewClientOption { return func(c *config) { - c.ankrAPIKey = key - c.provider = Ankr + c.provider = provider{name: "Ankr", apiKey: key} } } // NewClientLocal specifies that a local EVM backend should be used. func NewClientLocal() NewClientOption { return func(c *config) { - c.local = true - c.provider = Local + c.provider = provider{name: "Local", apiKey: ""} } } @@ -164,37 +147,34 @@ func getContractBackend(ctx context.Context, config config) (bind.ContractBacken return config.contractBackend, nil } - var rpcURL string - switch config.provider { - case Infura: - tmpl, found := client.InfuraURLs[config.chain.ID] + var tmpl string + var found bool + switch config.provider.name { + case "Infura": + tmpl, found = client.InfuraURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Infura", config.chain.ID) } - rpcURL = fmt.Sprintf(tmpl, config.infuraAPIKey) - case Alchemy: - tmpl, found := client.AlchemyURLs[config.chain.ID] + case "Alchemy": + tmpl, found = client.AlchemyURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Alchemy", config.chain.ID) } - rpcURL = fmt.Sprintf(tmpl, config.alchemyAPIKey) - case Ankr: - tmpl, found := client.AnkrURLs[config.chain.ID] + case "Ankr": + tmpl, found = client.AnkrURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Ankr", config.chain.ID) } - rpcURL = fmt.Sprintf(tmpl, config.ankrAPIKey) - case Local: - tmpl, found := client.LocalURLs[config.chain.ID] + case "Local": + tmpl, found = client.LocalURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Local", config.chain.ID) } - rpcURL = tmpl default: return nil, errors.New("no provider or ETH backend specified") } - return ethclient.DialContext(ctx, rpcURL) + return ethclient.DialContext(ctx, fmt.Sprintf(tmpl, config.provider.apiKey)) } // TableID is the ID of a Table. From d37d52f06ef8089e48744f00dc3e2df7ccc5c99d Mon Sep 17 00:00:00 2001 From: avichalp Date: Tue, 25 Apr 2023 19:00:09 +0800 Subject: [PATCH 13/14] [stagingdeploy] Use enums for provider comparision Signed-off-by: avichalp --- pkg/client/v1/client.go | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/pkg/client/v1/client.go b/pkg/client/v1/client.go index 6abf2a25..7d0c2217 100644 --- a/pkg/client/v1/client.go +++ b/pkg/client/v1/client.go @@ -32,9 +32,21 @@ type Client struct { baseURL *url.URL } +// providerType can have possible value denoting Alchemy, Ankr, Infura etc. +type providerType int + +const ( + alchemy providerType = iota + infura + quickNode + ankr + local +) + type provider struct { - name string - apiKey string + name string + apiKey string + providerType providerType } type config struct { @@ -56,28 +68,28 @@ func NewClientChain(chain client.Chain) NewClientOption { // NewClientInfuraAPIKey specifies an Infura API to use when creating an EVM backend. func NewClientInfuraAPIKey(key string) NewClientOption { return func(c *config) { - c.provider = provider{name: "Infura", apiKey: key} + c.provider = provider{name: "Infura", apiKey: key, providerType: infura} } } // NewClientAlchemyAPIKey specifies an Alchemy API to use when creating an EVM backend. func NewClientAlchemyAPIKey(key string) NewClientOption { return func(c *config) { - c.provider = provider{name: "Alchemy", apiKey: key} + c.provider = provider{name: "Alchemy", apiKey: key, providerType: alchemy} } } // NewClientAnkrAPIKey specifies an Ankr API to use when creating an EVM backend. func NewClientAnkrAPIKey(key string) NewClientOption { return func(c *config) { - c.provider = provider{name: "Ankr", apiKey: key} + c.provider = provider{name: "Ankr", apiKey: key, providerType: ankr} } } // NewClientLocal specifies that a local EVM backend should be used. func NewClientLocal() NewClientOption { return func(c *config) { - c.provider = provider{name: "Local", apiKey: ""} + c.provider = provider{name: "Local", apiKey: "", providerType: local} } } @@ -149,23 +161,23 @@ func getContractBackend(ctx context.Context, config config) (bind.ContractBacken var tmpl string var found bool - switch config.provider.name { - case "Infura": + switch config.provider.providerType { + case infura: tmpl, found = client.InfuraURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Infura", config.chain.ID) } - case "Alchemy": + case alchemy: tmpl, found = client.AlchemyURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Alchemy", config.chain.ID) } - case "Ankr": + case ankr: tmpl, found = client.AnkrURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Ankr", config.chain.ID) } - case "Local": + case local: tmpl, found = client.LocalURLs[config.chain.ID] if !found { return nil, fmt.Errorf("chain id %v not supported for Local", config.chain.ID) From 2b70d3d125020b423dbf4bce2a7667a769699e85 Mon Sep 17 00:00:00 2001 From: avichalp Date: Mon, 17 Apr 2023 15:47:21 +0900 Subject: [PATCH 14/14] [stagingdeploy] Use 1559 type transactions to create table Signed-off-by: avichalp --- pkg/tables/impl/ethereum/client.go | 32 ++++++++++++++++++++----- pkg/tables/impl/ethereum/client_test.go | 1 + 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/pkg/tables/impl/ethereum/client.go b/pkg/tables/impl/ethereum/client.go index a38c9e9e..b01e15ba 100644 --- a/pkg/tables/impl/ethereum/client.go +++ b/pkg/tables/impl/ethereum/client.go @@ -54,7 +54,7 @@ func NewClient( // CreateTable implements CreateTable. func (c *Client) CreateTable(ctx context.Context, owner common.Address, statement string) (tables.Transaction, error) { - gasPrice, err := c.backend.SuggestGasPrice(ctx) + gasTipCap, err := c.backend.SuggestGasTipCap(ctx) if err != nil { return nil, fmt.Errorf("suggest gas price: %s", err) } @@ -64,16 +64,36 @@ func (c *Client) CreateTable(ctx context.Context, owner common.Address, statemen return nil, fmt.Errorf("creating keyed transactor: %s", err) } + tablesABI, err := abi.JSON(strings.NewReader(ContractABI)) + if err != nil { + return nil, fmt.Errorf("parsing abi: %s", err) + } + + data, err := tablesABI.Pack("createTable", []interface{}{auth.From, statement}...) + if err != nil { + return nil, fmt.Errorf("abi packing: %s", err) + } + + gasLimit, err := c.backend.EstimateGas(ctx, ethereum.CallMsg{ + From: auth.From, + To: &c.contractAddr, + Data: data, + }) + if err != nil { + return nil, fmt.Errorf("gas estimate: %s", err) + } + tx, err := c.callWithRetry(ctx, func() (*types.Transaction, error) { registerPendingTx, unlock, nonce := c.tracker.GetNonce(ctx) defer unlock() opts := &bind.TransactOpts{ - Context: ctx, - Signer: auth.Signer, - From: auth.From, - Nonce: big.NewInt(0).SetInt64(nonce), - GasPrice: gasPrice, + Context: ctx, + Signer: auth.Signer, + From: auth.From, + Nonce: big.NewInt(0).SetInt64(nonce), + GasTipCap: gasTipCap, + GasLimit: gasLimit, } tx, err := c.contract.CreateTable(opts, owner, statement) diff --git a/pkg/tables/impl/ethereum/client_test.go b/pkg/tables/impl/ethereum/client_test.go index c218af62..d9e19486 100644 --- a/pkg/tables/impl/ethereum/client_test.go +++ b/pkg/tables/impl/ethereum/client_test.go @@ -44,6 +44,7 @@ func TestCreateTable(t *testing.T) { // TODO: How many logs and topics should there be? require.Len(t, receipt.Logs, 2) require.Len(t, receipt.Logs[0].Topics, 4) + require.Equal(t, receipt.Type, uint8(types.DynamicFeeTxType)) } func TestIsOwner(t *testing.T) {