diff --git a/.gitattributes b/.gitattributes index bc7f63709..541e04377 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,3 +1,5 @@ *.go text eol=lf * text=auto -contract/sqlite3-binding.* linguist-vendored +contract/sqlite3*.* linguist-vendored=true +libtool/src/gmp-*/* linguist-vendored=true + diff --git a/.travis.yml b/.travis.yml index b77ab51d5..c418a1d8a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,12 +5,10 @@ os: - osx go: - - 1.10.x + - 1.12.x before_install: - - go get -t github.com/Masterminds/glide - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go get -t github.com/codeclimate/test-reporter; fi - - cmake . install: - make all polaris colaris diff --git a/CMakeLists.txt b/CMakeLists.txt index 457085c29..9f61bb064 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,55 +17,47 @@ endif() add_custom_target(build ALL DEPENDS aergocli aergosvr aergoluac brick) -add_custom_target(aergocli GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X github.com/aergoio/aergo/cmd/aergocli/cmd.githash=`git describe --tags`\" ./cmd/aergocli/... +add_custom_target(aergocli GO111MODULE=on GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X github.com/aergoio/aergo/cmd/aergocli/cmd.githash=`git describe --tags`\" ./cmd/aergocli/... WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - DEPENDS vendor libtool) + DEPENDS libtool) -add_custom_target(aergosvr GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X main.githash=`git describe --tags` -X main.gitRevision=`git rev-parse --short HEAD` -X main.gitBranch=`git rev-parse --symbolic-full-name --abbrev-ref HEAD`\" ./cmd/aergosvr/... +add_custom_target(aergosvr GO111MODULE=on GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X main.githash=`git describe --tags` -X main.gitRevision=`git rev-parse --short HEAD` -X main.gitBranch=`git rev-parse --symbolic-full-name --abbrev-ref HEAD`\" ./cmd/aergosvr/... WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - DEPENDS vendor libtool) + DEPENDS libtool) -add_custom_target(polaris GOBIN=${BIN_DIR} go install ${GCFLAGS} ./cmd/polaris/... - WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - DEPENDS vendor libtool) +add_custom_target(polaris GO111MODULE=on GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X github.com/aergoio/aergo/cmd/polaris/cmd.githash=`git rev-parse HEAD`\" ./cmd/polaris/... + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}) -add_custom_target(colaris GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X github.com/aergoio/aergo/cmd/colaris/cmd.githash=`git rev-parse HEAD`\" ./cmd/colaris/... - WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - DEPENDS vendor libtool) +add_custom_target(colaris GO111MODULE=on GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X github.com/aergoio/aergo/cmd/colaris/cmd.githash=`git rev-parse HEAD`\" ./cmd/colaris/... + WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}) -add_custom_target(aergoluac GOBIN=${BIN_DIR} go install ${GCFLAGS} ./cmd/aergoluac/... +add_custom_target(aergoluac GO111MODULE=on GOBIN=${BIN_DIR} go install ${GCFLAGS} -ldflags \"-X main.githash=`git describe --tags`\" ./cmd/aergoluac/... WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - DEPENDS vendor libtool) + DEPENDS libtool) -add_custom_target(brick GOBIN=${BIN_DIR} go install ${GCFLAGS} ${GFLAG} -ldflags \"-X 'github.com/aergoio/aergo/cmd/brick/context.GitHash=`git describe --tags`' --X 'github.com/aergoio/aergo/vendor/github.com/aergoio/aergo-lib/log.defaultConfStr=`cat ./cmd/brick/arglog.toml`'\" ./cmd/brick/... +add_custom_target(brick GO111MODULE=on GOBIN=${BIN_DIR} go install ${GCFLAGS} ${GFLAG} -ldflags \"-X 'github.com/aergoio/aergo/cmd/brick/context.GitHash=`git describe --tags`' +-X 'github.com/aergoio/aergo-lib/log.defaultConfStr=`cat ./cmd/brick/arglog.toml`'\" ./cmd/brick/... WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - DEPENDS vendor libtool) + DEPENDS libtool) -set(VENDOR ${CMAKE_CURRENT_LIST_DIR}/vendor) -add_custom_command(OUTPUT ${VENDOR} - COMMAND glide install - WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} - DEPENDS glide.yaml glide.lock) -add_custom_target(vendor DEPENDS ${VENDOR}) +add_custom_target(deps DEPENDS libtool) -add_custom_target(deps DEPENDS vendor libtool) - -add_custom_target(check go test -timeout 60s ./... +add_custom_target(check GO111MODULE=on go test -timeout 60s ./... WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} DEPENDS build) -add_custom_target(cover-check go test -coverprofile c.out ./... +add_custom_target(cover-check GO111MODULE=on go test -coverprofile c.out ./... WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR} DEPENDS build) -add_custom_target(distclean go clean .. +add_custom_target(distclean GO111MODULE=on go clean .. COMMAND rm -rf ${BIN_DIR}/aergo* ${BIN_DIR}/brick DEPENDS libtool-clean) add_custom_target(protoc COMMAND protoc -I/usr/local/include -I${PROTO_DIR} --go_out=plugins=grpc:$ENV{GOPATH}/src ${PROTO_DIR}/*.proto - COMMAND go build ../types/...) + COMMAND GO111MODULE=on go build ../types/...) add_custom_target(protoclean rm -f ../types/*.pb.go) + add_subdirectory(libtool) diff --git a/Dockerfile b/Dockerfile index db9b25e56..c1e02da00 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,13 +1,13 @@ -FROM golang:alpine as builder -RUN apk update && apk add git glide cmake build-base m4 +FROM golang:1.12.5-alpine3.9 as builder +RUN apk update && apk add git cmake build-base m4 ENV GOPATH $HOME/go ARG GIT_TAG RUN go get -d github.com/aergoio/aergo WORKDIR ${GOPATH}/src/github.com/aergoio/aergo -RUN git checkout --detach ${GIT_TAG} && git submodule init && git submodule update && cmake . +RUN git checkout --detach ${GIT_TAG} && git submodule init && git submodule update RUN make aergosvr -FROM alpine:3.8 +FROM alpine:3.9 RUN apk add libgcc COPY --from=builder $HOME/go/src/github.com/aergoio/aergo/bin/aergosvr /usr/local/bin/ COPY --from=builder $HOME/go/src/github.com/aergoio/aergo/libtool/lib/* /usr/local/lib/ diff --git a/Dockerfile.polaris b/Dockerfile.polaris index dea9ef329..1cc5f99eb 100644 --- a/Dockerfile.polaris +++ b/Dockerfile.polaris @@ -1,18 +1,16 @@ -FROM golang:alpine as builder -RUN apk update && apk add git glide cmake build-base m4 +FROM golang:1.12.5-alpine3.9 as builder +RUN apk update && apk add git cmake build-base m4 ENV GOPATH $HOME/go ARG GIT_TAG RUN go get -d github.com/aergoio/aergo WORKDIR ${GOPATH}/src/github.com/aergoio/aergo -RUN git checkout --detach ${GIT_TAG} && git submodule init && git submodule update && cmake . +RUN git checkout --detach ${GIT_TAG} && git submodule init && git submodule update RUN make polaris colaris -FROM alpine:3.8 +FROM alpine:3.9 RUN apk add libgcc COPY --from=builder $HOME/go/src/github.com/aergoio/aergo/bin/polaris /usr/local/bin/ COPY --from=builder $HOME/go/src/github.com/aergoio/aergo/bin/colaris /usr/local/bin/ -COPY --from=builder $HOME/go/src/github.com/aergoio/aergo/libtool/lib/* /usr/local/lib/ -ENV LD_LIBRARY_PATH="/usr/local/lib:${LD_LIBRARY_PATH}" WORKDIR /tools/ CMD ["polaris"] diff --git a/Dockerfile.tools b/Dockerfile.tools index 47edfcc03..494ad36bd 100644 --- a/Dockerfile.tools +++ b/Dockerfile.tools @@ -1,13 +1,13 @@ -FROM golang:alpine as builder -RUN apk update && apk add git glide cmake build-base m4 +FROM golang:1.12.5-alpine3.9 as builder +RUN apk update && apk add git cmake build-base m4 ENV GOPATH $HOME/go ARG GIT_TAG RUN go get -d github.com/aergoio/aergo WORKDIR ${GOPATH}/src/github.com/aergoio/aergo -RUN git checkout --detach ${GIT_TAG} && git submodule init && git submodule update && cmake . +RUN git checkout --detach ${GIT_TAG} && git submodule init && git submodule update RUN make aergocli aergoluac brick -FROM alpine:3.8 +FROM alpine:3.9 RUN apk add libgcc COPY --from=builder $HOME/go/src/github.com/aergoio/aergo/bin/aergocli /usr/local/bin/ COPY --from=builder $HOME/go/src/github.com/aergoio/aergo/bin/aergoluac /usr/local/bin/ diff --git a/Makefile b/Makefile index e36840ba5..f4b54a3b2 100644 --- a/Makefile +++ b/Makefile @@ -16,23 +16,35 @@ ifeq ($(OS),Windows_NT) endif endif -.PHONY: all release debug clean +BUILD_RULES := \ + deps \ + aergocli aergosvr aergoluac polaris colaris brick \ + libtool libtool-clean \ + libluajit liblmdb libgmp \ + libluajit-clean liblmdb-clean libgmp-clean \ + check cover-check \ + distclean \ + protoc protoclean + +.PHONY: all release debug clean $(BUILD_RULES) all: $(BUILD_FILE) @$(MAKE) --no-print-directory -C $(BUILD_DIR) -$(BUILD_FILE): +$(BUILD_DIR): @mkdir -p $(BUILD_DIR) - cd $(BUILD_DIR) && $(CMAKE_CMD) -G "Unix Makefiles" -D CMAKE_BUILD_TYPE="Release" $(MAKE_FLAG) .. -release: - @mkdir -p $(BUILD_DIR) +$(BUILD_FILE): $(BUILD_DIR) + @if ! [ -f $(BUILD_FILE) ]; then \ + cd $(BUILD_DIR) && $(CMAKE_CMD) -G "Unix Makefiles" -D CMAKE_BUILD_TYPE="Release" $(MAKE_FLAG) ..; \ + fi + +release: $(BUILD_DIR) cd $(BUILD_DIR) && $(CMAKE_CMD) -G "Unix Makefiles" -D CMAKE_BUILD_TYPE="Release" $(MAKE_FLAG) .. @$(MAKE) --no-print-directory -C $(BUILD_DIR) -debug: - @mkdir -p $(BUILD_DIR) - cd $(BUILD_DIR) && $(CMAKE_CMD) -G "Unix Makefiles" -D CMAKE_BUILD_TYPE="Debug" $(MAKE_FLAG) .. +debug: $(BUILD_DIR) + @cd $(BUILD_DIR) && $(CMAKE_CMD) -G "Unix Makefiles" -D CMAKE_BUILD_TYPE="Debug" $(MAKE_FLAG) .. @$(MAKE) --no-print-directory -C $(BUILD_DIR) clean: @@ -41,5 +53,5 @@ clean: realclean: clean @rm -rf $(BUILD_DIR) -%: - @$(MAKE) --no-print-directory -C $(BUILD_DIR) $(MAKECMDGOALS) +$(BUILD_RULES): $(BUILD_FILE) + @$(MAKE) --no-print-directory -C $(BUILD_DIR) $@ diff --git a/README.md b/README.md index c3f8d2315..7f8b78c2c 100644 --- a/README.md +++ b/README.md @@ -108,8 +108,7 @@ MVP based, Forward compatibility, Iteration ### Prerequisites -* Go1.10 or higher - https://golang.org/dl -* Glide - https://github.com/Masterminds/glide +* Go1.11.4+ - https://golang.org/dl * Proto Buffers - https://github.com/google/protobuf * CMake 3.0.0 or higher - https://cmake.org diff --git a/account/key/address.go b/account/key/address.go index 328bf7fa6..6c101e1d6 100644 --- a/account/key/address.go +++ b/account/key/address.go @@ -34,12 +34,19 @@ func (ks *Store) SaveAddress(addr Address) error { if len(addr) != types.AddressLength { return errors.New("invalid address length") } + + ks.RWMutex.Lock() + defer ks.RWMutex.Unlock() + addrs := append(ks.storage.Get(addresses), addr...) ks.storage.Set(addresses, addrs) return nil } func (ks *Store) GetAddresses() ([]Address, error) { + ks.RWMutex.RLock() + defer ks.RWMutex.RUnlock() + b := ks.storage.Get(addresses) var ret []Address for i := 0; i < len(b); i += types.AddressLength { diff --git a/account/key/store.go b/account/key/store.go index 697581ee3..55ee94393 100644 --- a/account/key/store.go +++ b/account/key/store.go @@ -6,6 +6,7 @@ import ( "crypto/cipher" "errors" "path" + "sync" "time" "github.com/aergoio/aergo-lib/db" @@ -23,9 +24,11 @@ type keyPair struct { // Store stucture of keystore type Store struct { - timeout time.Duration - unlocked map[string]*keyPair - storage db.DB + sync.RWMutex + timeout time.Duration + unlocked map[string]*keyPair + unlockedLock *sync.Mutex + storage db.DB } // NewStore make new instance of keystore @@ -33,9 +36,10 @@ func NewStore(storePath string, unlockTimeout uint) *Store { const dbName = "account" dbPath := path.Join(storePath, dbName) return &Store{ - timeout: time.Duration(unlockTimeout) * time.Second, - unlocked: map[string]*keyPair{}, - storage: db.NewDB(db.LevelImpl, dbPath), + timeout: time.Duration(unlockTimeout) * time.Second, + unlocked: map[string]*keyPair{}, + unlockedLock: &sync.Mutex{}, + storage: db.NewDB(db.LevelImpl, dbPath), } } func (ks *Store) CloseStore() { @@ -103,6 +107,10 @@ func (ks *Store) Unlock(addr Address, pass string) (Address, error) { } pk, _ := btcec.PrivKeyFromBytes(btcec.S256(), key) addrKey := types.EncodeAddress(addr) + + ks.unlockedLock.Lock() + defer ks.unlockedLock.Unlock() + unlockedKeyPair, exist := ks.unlocked[addrKey] if ks.timeout == 0 { @@ -130,6 +138,10 @@ func (ks *Store) Lock(addr Address, pass string) (Address, error) { return nil, err } b58addr := types.EncodeAddress(addr) + + ks.unlockedLock.Lock() + defer ks.unlockedLock.Unlock() + if _, exist := ks.unlocked[b58addr]; exist { ks.unlocked[b58addr] = nil delete(ks.unlocked, b58addr) diff --git a/account/key/store_test.go b/account/key/store_test.go index 4fa966533..90ace5205 100644 --- a/account/key/store_test.go +++ b/account/key/store_test.go @@ -3,9 +3,11 @@ package key import ( "fmt" "io/ioutil" + "sync" "testing" "github.com/aergoio/aergo/types" + "github.com/stretchr/testify/assert" ) var ( @@ -53,7 +55,7 @@ func TestCreateKeyLongPass(t *testing.T) { } } -func TestExportKey(t *testing.T) { +func TestExportImportKey(t *testing.T) { initTest() defer deinitTest() const testSize = 10 @@ -73,6 +75,9 @@ func TestExportKey(t *testing.T) { if len(exported) != 48 { t.Errorf("invalid exported address : length = %d", len(exported)) } + imported, err := ks.ImportKey(exported, pass, pass) + assert.NoError(t, err, "import") + assert.Equal(t, imported, addr, "import result") } } @@ -127,3 +132,26 @@ func TestSign(t *testing.T) { } } } + +func TestConcurrentUnlockAndLock(t *testing.T) { + initTest() + defer deinitTest() + + pass := "password" + addr, err := ks.CreateKey(pass) + if err != nil { + t.Errorf("could not create key : %s", err.Error()) + } + + const testSize = 50 + var wg sync.WaitGroup + for i := 0; i < testSize; i++ { + wg.Add(1) + go func(wg *sync.WaitGroup, id int) { + defer wg.Done() + ks.Unlock(addr, pass) + ks.Lock(addr, pass) + }(&wg, i) + } + wg.Wait() +} diff --git a/aergo-protobuf b/aergo-protobuf index 9e86ed578..3835057bd 160000 --- a/aergo-protobuf +++ b/aergo-protobuf @@ -1 +1 @@ -Subproject commit 9e86ed57878f691d4a5ab70c50d3410013cccd8b +Subproject commit 3835057bd44d7085b3d9d9c626323d891a79d300 diff --git a/chain/chaindb.go b/chain/chaindb.go index d1b2f3053..aaaf6cebb 100644 --- a/chain/chaindb.go +++ b/chain/chaindb.go @@ -26,19 +26,25 @@ const ( chainDBName = "chain" genesisKey = chainDBName + ".genesisInfo" genesisBalanceKey = chainDBName + ".genesisBalance" - - TxBatchMax = 10000 ) var ( // ErrNoChainDB reports chaindb is not prepared. - ErrNoChainDB = fmt.Errorf("chaindb not prepared") - ErrorLoadBestBlock = errors.New("failed to load latest block from DB") - ErrCantDropGenesis = errors.New("can't drop genesis block") - ErrTooBigResetHeight = errors.New("reset height is too big") + ErrNoChainDB = fmt.Errorf("chaindb not prepared") + ErrorLoadBestBlock = errors.New("failed to load latest block from DB") + ErrCantDropGenesis = errors.New("can't drop genesis block") + ErrTooBigResetHeight = errors.New("reset height is too big") + ErrInvalidHardState = errors.New("invalid hard state") + ErrInvalidRaftSnapshot = errors.New("invalid raft snapshot") latestKey = []byte(chainDBName + ".latest") receiptsPrefix = []byte("r") + + raftIdentityKey = []byte("r_identity") + raftStateKey = []byte("r_state") + raftSnapKey = []byte("r_snap") + raftEntryLastIdxKey = []byte("r_last") + raftEntryPrefix = []byte("r_entry.") ) // ErrNoBlock reports there is no such a block with id (hash or block number). @@ -93,6 +99,13 @@ func (cdb *ChainDB) Init(dbType string, dataDir string) error { if err := cdb.loadChainData(); err != nil { return err } + + // recover from reorg marker + if err := cdb.recover(); err != nil { + logger.Error().Err(err).Msg("failed to recover chain database from crash") + return err + } + // // if empty then create new genesis block // // if cdb.getBestBlockNo() == 0 && len(cdb.blocks) == 0 { // blockIdx := types.BlockNoToBytes(0) @@ -103,6 +116,23 @@ func (cdb *ChainDB) Init(dbType string, dataDir string) error { return nil } +func (cdb *ChainDB) recover() error { + marker, err := cdb.getReorgMarker() + if err != nil { + return err + } + + if marker == nil { + return nil + } + + if err := marker.RecoverChainMapping(cdb); err != nil { + return err + } + + return nil +} + // ResetBest reset best block of chain db manually remove blocks from original // best to resetNo. // @@ -275,7 +305,7 @@ func (cdb *ChainDB) addGenesisBlock(genesis *types.Genesis) error { block.BlockID() } - cdb.connectToChain(&tx, block) + cdb.connectToChain(&tx, block, false) tx.Set([]byte(genesisKey), genesis.Bytes()) if totalBalance := genesis.TotalBalance(); totalBalance != nil { tx.Set([]byte(genesisBalanceKey), totalBalance.Bytes()) @@ -332,12 +362,14 @@ func (cdb *ChainDB) setLatest(newBestBlock *types.Block) (oldLatest types.BlockN return } -func (cdb *ChainDB) connectToChain(dbtx *db.Transaction, block *types.Block) (oldLatest types.BlockNo) { +func (cdb *ChainDB) connectToChain(dbtx *db.Transaction, block *types.Block, skipAdd bool) (oldLatest types.BlockNo) { blockNo := block.GetHeader().GetBlockNo() blockIdx := types.BlockNoToBytes(blockNo) - if err := cdb.addBlock(dbtx, block); err != nil { - return 0 + if !skipAdd { + if err := cdb.addBlock(dbtx, block); err != nil { + return 0 + } } // Update best block hash @@ -358,7 +390,7 @@ func (cdb *ChainDB) connectToChain(dbtx *db.Transaction, block *types.Block) (ol return } -func (cdb *ChainDB) swapChain(newBlocks []*types.Block) error { +func (cdb *ChainDB) swapChainMapping(newBlocks []*types.Block) error { oldNo := cdb.getBestBlockNo() newNo := newBlocks[0].GetHeader().GetBlockNo() @@ -369,41 +401,24 @@ func (cdb *ChainDB) swapChain(newBlocks []*types.Block) error { } var blockIdx []byte - var dbTx db.Transaction - - txCnt := 0 - dbTx = cdb.store.NewTx() - defer dbTx.Discard() + bulk := cdb.store.NewBulk() + defer bulk.DiscardLast() //make newTx because of batchsize limit of DB - getNewTx := func(remainTxCnt int) { - if txCnt+remainTxCnt >= TxBatchMax { - dbTx.Commit() - dbTx = cdb.store.NewTx() - txCnt = 0 - } - } - for i := len(newBlocks) - 1; i >= 0; i-- { block := newBlocks[i] blockIdx = types.BlockNoToBytes(block.GetHeader().GetBlockNo()) - dbTx.Set(blockIdx, block.BlockHash()) - - txCnt++ - - getNewTx(0) + bulk.Set(blockIdx, block.BlockHash()) } - getNewTx(5) - - dbTx.Set(latestKey, blockIdx) + bulk.Set(latestKey, blockIdx) // Save the last consensus status. - cdb.cc.Save(dbTx) + cdb.cc.Save(bulk) - dbTx.Commit() + bulk.Flush() cdb.setLatest(newBlocks[0]) @@ -445,6 +460,10 @@ func (cdb *ChainDB) addTxsOfBlock(dbTx *db.Transaction, txs []*types.Tx, blockHa return err } + + if err := TestDebugger.Check(DEBUG_CHAIN_STOP, 4, nil); err != nil { + return err + } } return nil @@ -564,6 +583,10 @@ func (cdb *ChainDB) GetBlockByNo(blockNo types.BlockNo) (*types.Block, error) { return cdb.getBlock(blockHash) } +func (cdb *ChainDB) GetBlock(blockHash []byte) (*types.Block, error) { + return cdb.getBlock(blockHash) +} + func (cdb *ChainDB) getBlock(blockHash []byte) (*types.Block, error) { if blockHash == nil { return nil, fmt.Errorf("block hash invalid(nil)") @@ -692,3 +715,49 @@ func receiptsKey(blockHash []byte, blockNo types.BlockNo) []byte { key.Write(l) return key.Bytes() } + +func (cdb *ChainDB) writeReorgMarker(marker *ReorgMarker) error { + dbTx := cdb.store.NewTx() + defer dbTx.Discard() + + val, err := marker.toBytes() + if err != nil { + logger.Error().Err(err).Msg("failed to serialize reorg marker") + return err + } + + dbTx.Set(reorgKey, val) + + dbTx.Commit() + return nil +} + +func (cdb *ChainDB) deleteReorgMarker() { + dbTx := cdb.store.NewTx() + defer dbTx.Discard() + + dbTx.Delete(reorgKey) + + dbTx.Commit() +} + +func (cdb *ChainDB) getReorgMarker() (*ReorgMarker, error) { + data := cdb.store.Get(reorgKey) + if len(data) == 0 { + return nil, nil + } + + var marker ReorgMarker + var b bytes.Buffer + b.Write(data) + decoder := gob.NewDecoder(&b) + err := decoder.Decode(&marker) + + return &marker, err +} + +// implement ChainWAL interface +func (cdb *ChainDB) IsNew() bool { + //TODO + return true +} diff --git a/chain/chaindbForRaft.go b/chain/chaindbForRaft.go new file mode 100644 index 000000000..0004de31d --- /dev/null +++ b/chain/chaindbForRaft.go @@ -0,0 +1,298 @@ +package chain + +import ( + "bytes" + "encoding/binary" + "encoding/gob" + "errors" + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/types" + "github.com/aergoio/etcd/raft/raftpb" + "github.com/gogo/protobuf/proto" +) + +var ( + ErrMismatchedEntry = errors.New("mismatched entry") + ErrNoWalEntry = errors.New("no entry") + ErrEncodeRaftIdentity = errors.New("failed encoding of raft identity") + ErrDecodeRaftIdentity = errors.New("failed decoding of raft identity") +) + +// implement ChainWAL interface +func (cdb *ChainDB) IsWALInited() bool { + if idx, err := cdb.GetRaftEntryLastIdx(); idx > 0 && err != nil { + return true + } + return false +} + +func (cdb *ChainDB) ReadAll() (state raftpb.HardState, ents []raftpb.Entry, err error) { + //TODO + return raftpb.HardState{}, nil, nil +} + +func (cdb *ChainDB) WriteHardState(hardstate *raftpb.HardState) error { + dbTx := cdb.store.NewTx() + defer dbTx.Discard() + + var data []byte + var err error + + logger.Info().Uint64("term", hardstate.Term).Uint64("vote", hardstate.Vote).Uint64("commit", hardstate.Commit).Msg("save hard state") + + if data, err = proto.Marshal(hardstate); err != nil { + logger.Panic().Msg("failed to marshal raft state") + return err + } + dbTx.Set(raftStateKey, data) + dbTx.Commit() + + return nil +} + +func (cdb *ChainDB) GetHardState() (*raftpb.HardState, error) { + data := cdb.store.Get(raftStateKey) + + state := &raftpb.HardState{} + if err := proto.Unmarshal(data, state); err != nil { + logger.Panic().Msg("failed to unmarshal raft state") + return nil, ErrInvalidHardState + } + + logger.Info().Uint64("term", state.Term).Uint64("vote", state.Vote).Uint64("commit", state.Commit).Msg("load hard state") + + return state, nil +} + +func getRaftEntryKey(idx uint64) []byte { + var key bytes.Buffer + key.Write(raftEntryPrefix) + l := make([]byte, 8) + binary.LittleEndian.PutUint64(l[:], idx) + key.Write(l) + return key.Bytes() +} + +func (cdb *ChainDB) WriteRaftEntry(ents []*consensus.WalEntry, blocks []*types.Block) error { + var data []byte + var err error + var lastIdx uint64 + + // truncate conflicting entry + last, err := cdb.GetRaftEntryLastIdx() + if err != nil { + return err + } + + dbTx := cdb.store.NewTx() + defer dbTx.Discard() + + if ents[0].Index <= last { + logger.Debug().Uint64("from", ents[0].Index).Uint64("to", last).Msg("truncate conflicting index") + + for i := ents[0].Index; i <= last; i++ { + // delete ents[0].Index ~ lastIndex of wal + dbTx.Delete(getRaftEntryKey(i)) + } + } + + for i, entry := range ents { + logger.Debug().Str("type", consensus.WalEntryType_name[entry.Type]).Uint64("Index", entry.Index).Uint64("term", entry.Term).Msg("add raft log entry") + + if entry.Type == consensus.EntryBlock { + if err := cdb.addBlock(&dbTx, blocks[i]); err != nil { + panic("add block entry") + return err + } + } + + if data, err = entry.ToBytes(); err != nil { + return err + } + + lastIdx = entry.Index + dbTx.Set(getRaftEntryKey(entry.Index), data) + } + + // set lastindex + logger.Debug().Uint64("index", lastIdx).Msg("set last wal entry") + + dbTx.Set(raftEntryLastIdxKey, types.BlockNoToBytes(lastIdx)) + + dbTx.Commit() + + return nil +} + +func (cdb *ChainDB) GetRaftEntry(idx uint64) (*consensus.WalEntry, error) { + data := cdb.store.Get(getRaftEntryKey(idx)) + if len(data) == 0 { + return nil, ErrNoWalEntry + } + + var entry consensus.WalEntry + var b bytes.Buffer + b.Write(data) + decoder := gob.NewDecoder(&b) + if err := decoder.Decode(&entry); err != nil { + return nil, err + } + + if entry.Index != idx { + logger.Error().Uint64("entry", entry.Index).Uint64("req", idx).Msg("mismatched wal entry") + return nil, ErrMismatchedEntry + } + + return &entry, nil +} + +func (cdb *ChainDB) GetRaftEntryLastIdx() (uint64, error) { + lastBytes := cdb.store.Get(raftEntryLastIdxKey) + if lastBytes == nil || len(lastBytes) == 0 { + return 0, nil + } + + return types.BlockNoFromBytes(lastBytes), nil +} + +func (cdb *ChainDB) HasWal() (bool, error) { + last, err := cdb.GetRaftEntryLastIdx() + if err != nil { + return false, err + } + + if last > 0 { + return true, nil + } + + return false, nil +} + +/* +func encodeBool(v bool) ([]byte, error) { + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.LittleEndian, v) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func decodeBool(data []byte) (bool, error) { + var val bool + bufreader := bytes.NewReader(data) + if err := binary.Read(bufreader, binary.LittleEndian, &val); err != nil { + return false, err + } + + return val, nil +} +*/ + +func (cdb *ChainDB) WriteSnapshot(snap *raftpb.Snapshot) error { + var snapdata = consensus.SnapshotData{} + err := snapdata.Decode(snap.Data) + if err != nil { + logger.Fatal().Msg("failed to unmarshal snapshot data to write") + return err + } + + logger.Debug().Str("snapshot", consensus.SnapToString(snap, &snapdata)).Msg("write snapshot to wal") + data, err := proto.Marshal(snap) + if err != nil { + return err + } + + dbTx := cdb.store.NewTx() + dbTx.Set(raftSnapKey, data) + dbTx.Commit() + + return nil +} + +/* +func (cdb *ChainDB) WriteSnapshotDone() error { + data, err := encodeBool(true) + if err != nil { + return err + } + + dbTx := cdb.store.NewTx() + dbTx.Set(raftSnapStatusKey, data) + dbTx.Commit() + + return nil +} + +func (cdb *ChainDB) GetSnapshotDone() (bool, error) { + data := cdb.store.Get(raftSnapStatusKey) + if len(data) == 0 { + return false, nil + } + + val, err := decodeBool(data) + if err != nil { + return false, err + } + + return val, nil +} +*/ +func (cdb *ChainDB) GetSnapshot() (*raftpb.Snapshot, error) { + data := cdb.store.Get(raftSnapKey) + if len(data) == 0 { + return nil, nil + } + + snap := &raftpb.Snapshot{} + if err := proto.Unmarshal(data, snap); err != nil { + logger.Panic().Msg("failed to unmarshal raft snap") + return nil, ErrInvalidRaftSnapshot + } + + if snap.Data == nil { + logger.Panic().Msg("raft snap data is nil") + return nil, ErrInvalidRaftSnapshot + } + + return snap, nil +} + +func (cdb *ChainDB) WriteIdentity(identity *consensus.RaftIdentity) error { + dbTx := cdb.store.NewTx() + defer dbTx.Discard() + + logger.Info().Str("id", identity.ToString()).Msg("save raft identity") + + var val bytes.Buffer + + gob := gob.NewEncoder(&val) + if err := gob.Encode(identity); err != nil { + return ErrEncodeRaftIdentity + } + + dbTx.Set(raftIdentityKey, val.Bytes()) + dbTx.Commit() + + return nil +} + +func (cdb *ChainDB) GetIdentity() (*consensus.RaftIdentity, error) { + data := cdb.store.Get(raftIdentityKey) + if len(data) == 0 { + return nil, nil + } + + var id consensus.RaftIdentity + var b bytes.Buffer + b.Write(data) + decoder := gob.NewDecoder(&b) + if err := decoder.Decode(&id); err != nil { + return nil, ErrDecodeRaftIdentity + } + + logger.Info().Uint64("id", id.ID).Str("name", id.Name).Msg("save raft identity") + + return &id, nil +} diff --git a/chain/chainhandle.go b/chain/chainhandle.go index f824d0e20..28a491172 100644 --- a/chain/chainhandle.go +++ b/chain/chainhandle.go @@ -30,6 +30,7 @@ var ( ErrBlockOrphan = errors.New("block is ohphan, so not connected in chain") ErrBlockCachedErrLRU = errors.New("block is in errored blocks cache") ErrBlockTooHighSideChain = errors.New("block no is higher than best block, it should have been reorganized") + ErrStateNoMarker = errors.New("statedb marker of block is not exists") errBlockStale = errors.New("produced block becomes stale") errBlockTimestamp = errors.New("invalid timestamp") @@ -305,7 +306,17 @@ func (cp *chainProcessor) notifyBlockByOther(block *types.Block) { } } +func checkDebugSleep(isBP bool) { + if isBP { + _ = TestDebugger.Check(DEBUG_CHAIN_BP_SLEEP, 0, nil) + } else { + _ = TestDebugger.Check(DEBUG_CHAIN_OTHER_SLEEP, 0, nil) + } +} + func (cp *chainProcessor) executeBlock(block *types.Block) error { + checkDebugSleep(cp.isByBP) + err := cp.ChainService.executeBlock(cp.state, block) cp.state = nil return err @@ -341,8 +352,8 @@ func (cp *chainProcessor) connectToChain(block *types.Block) (types.BlockNo, err dbTx := cp.cdb.store.NewTx() defer dbTx.Discard() - oldLatest := cp.cdb.connectToChain(&dbTx, block) - + // skip to add hash/block if wal of block is already written + oldLatest := cp.cdb.connectToChain(&dbTx, block, cp.isByBP && cp.HasWAL()) if err := cp.cdb.addTxsOfBlock(&dbTx, block.GetBody().GetTxs(), block.BlockHash()); err != nil { return 0, err } @@ -356,7 +367,7 @@ func (cp *chainProcessor) reorganize() error { // - Reorganize if new bestblock then process Txs // - Add block if new bestblock then update context connect next orphan if !cp.isMainChain && cp.needReorg(cp.lastBlock) { - err := cp.reorg(cp.lastBlock) + err := cp.reorg(cp.lastBlock, nil) if e, ok := err.(consensus.ErrorConsensus); ok { logger.Info().Err(e).Msg("reorg stopped by consensus error") return nil @@ -460,7 +471,13 @@ func (cs *ChainService) addBlock(newBlock *types.Block, usedBstate *state.BlockS return ErrBlockCachedErrLRU } - _, err := cs.getBlock(newBlock.BlockHash()) + var err error + if !cs.HasWAL() { + _, err = cs.getBlock(newBlock.BlockHash()) + } else { + // check alread connect block + _, err = cs.getBlockByNo(newBlock.GetHeader().GetBlockNo()) + } if err == nil { logger.Warn().Msg("block already exists") return nil @@ -540,7 +557,7 @@ func newBlockExecutor(cs *ChainService, bState *state.BlockState, block *types.B bState = state.NewBlockState(cs.sdb.OpenNewStateDB(cs.sdb.GetRoot())) - exec = NewTxExecutor(block.BlockNo(), block.GetHeader().GetTimestamp(), block.GetHeader().GetPrevBlockHash(), contract.ChainService, block.GetHeader().ChainID) + exec = NewTxExecutor(cs.cdb, block.BlockNo(), block.GetHeader().GetTimestamp(), block.GetHeader().GetPrevBlockHash(), contract.ChainService, block.GetHeader().ChainID) validateSignWait = func() error { return cs.validator.WaitVerifyDone() @@ -567,7 +584,7 @@ func newBlockExecutor(cs *ChainService, bState *state.BlockState, block *types.B } // NewTxExecutor returns a new TxExecFn. -func NewTxExecutor(blockNo types.BlockNo, ts int64, prevBlockHash []byte, preLoadService int, chainID []byte) TxExecFn { +func NewTxExecutor(cdb contract.ChainAccessor, blockNo types.BlockNo, ts int64, prevBlockHash []byte, preLoadService int, chainID []byte) TxExecFn { return func(bState *state.BlockState, tx types.Transaction) error { if bState == nil { logger.Error().Msg("bstate is nil in txexec") @@ -575,10 +592,13 @@ func NewTxExecutor(blockNo types.BlockNo, ts int64, prevBlockHash []byte, preLoa } snapshot := bState.Snapshot() - err := executeTx(bState, tx, blockNo, ts, prevBlockHash, preLoadService, common.Hasher(chainID)) + err := executeTx(cdb, bState, tx, blockNo, ts, prevBlockHash, preLoadService, common.Hasher(chainID)) if err != nil { logger.Error().Err(err).Str("hash", enc.ToString(tx.GetHash())).Msg("tx failed") - bState.Rollback(snapshot) + if err2 := bState.Rollback(snapshot); err2 != nil { + logger.Panic().Err(err).Msg("faield to rollback block state") + } + return err } return nil @@ -669,6 +689,7 @@ func (cs *ChainService) executeBlock(bstate *state.BlockState, block *types.Bloc return err } + // TODO refactoring: receive execute function as argument (executeBlock or executeBlockReco) ex, err := newBlockExecutor(cs, bstate, block) if err != nil { return err @@ -692,6 +713,44 @@ func (cs *ChainService) executeBlock(bstate *state.BlockState, block *types.Bloc return nil } +// TODO: Refactoring: batch +func (cs *ChainService) executeBlockReco(_ *state.BlockState, block *types.Block) error { + // Caution: block must belong to the main chain. + logger.Debug().Str("hash", block.ID()).Uint64("no", block.GetHeader().BlockNo).Msg("start to execute for reco") + + var ( + bestBlock *types.Block + err error + ) + + if bestBlock, err = cs.cdb.GetBestBlock(); err != nil { + return err + } + + // Check consensus info validity + // TODO remove bestblock + if err = cs.IsBlockValid(block, bestBlock); err != nil { + return err + } + + if !cs.sdb.GetStateDB().HasMarker(block.GetHeader().GetBlocksRootHash()) { + logger.Error().Str("hash", block.ID()).Uint64("no", block.GetHeader().GetBlockNo()).Msg("state marker does not exist") + return ErrStateNoMarker + } + + // move stateroot + if err := cs.sdb.SetRoot(block.GetHeader().GetBlocksRootHash()); err != nil { + return fmt.Errorf("failed to set sdb(branchRoot:no=%d,hash=%v)", block.GetHeader().GetBlockNo(), + block.ID()) + } + + cs.Update(block) + + logger.Debug().Uint64("no", block.GetHeader().BlockNo).Msg("end to execute for reco") + + return nil +} + func (cs *ChainService) notifyEvents(block *types.Block, bstate *state.BlockState) { blkNo := block.GetHeader().GetBlockNo() blkHash := block.BlockHash() @@ -728,7 +787,7 @@ func adjustRv(ret string) string { return ret } -func executeTx(bs *state.BlockState, tx types.Transaction, blockNo uint64, ts int64, prevBlockHash []byte, preLoadService int, chainIDHash []byte) error { +func executeTx(cdb contract.ChainAccessor, bs *state.BlockState, tx types.Transaction, blockNo uint64, ts int64, prevBlockHash []byte, preLoadService int, chainIDHash []byte) error { txBody := tx.GetBody() @@ -778,7 +837,7 @@ func executeTx(bs *state.BlockState, tx types.Transaction, blockNo uint64, ts in var events []*types.Event switch txBody.Type { case types.TxType_NORMAL: - rv, events, txFee, err = contract.Execute(bs, tx.GetTx(), blockNo, ts, prevBlockHash, sender, receiver, preLoadService) + rv, events, txFee, err = contract.Execute(bs, cdb, tx.GetTx(), blockNo, ts, prevBlockHash, sender, receiver, preLoadService) sender.SubBalance(txFee) case types.TxType_GOVERNANCE: txFee = new(big.Int).SetUint64(0) @@ -862,7 +921,7 @@ func (cs *ChainService) resolveOrphan(block *types.Block) (*types.Block, error) return nil, nil } - orphanBlock := orphan.block + orphanBlock := orphan.Block if (block.GetHeader().GetBlockNo() + 1) != orphanBlock.GetHeader().GetBlockNo() { return nil, fmt.Errorf("invalid orphan block no (p=%d, c=%d)", block.GetHeader().GetBlockNo(), @@ -873,7 +932,9 @@ func (cs *ChainService) resolveOrphan(block *types.Block) (*types.Block, error) Str("orphan", orphanBlock.ID()). Msg("connect orphan") - cs.op.removeOrphan(orphanID) + if err := cs.op.removeOrphan(orphanID); err != nil { + return nil, err + } return orphanBlock, nil } diff --git a/chain/chainhandle_test.go b/chain/chainhandle_test.go index 00b67c21c..f3fc537a0 100644 --- a/chain/chainhandle_test.go +++ b/chain/chainhandle_test.go @@ -60,33 +60,33 @@ func TestErrorInExecuteTx(t *testing.T) { tx := &types.Tx{} - err := executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err := executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.EqualError(t, err, types.ErrTxFormatInvalid.Error(), "execute empty tx") tx.Body = &types.TxBody{} - err = executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err = executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.EqualError(t, err, types.ErrTxInvalidChainIdHash.Error(), "execute empty tx body") tx.Body.ChainIdHash = chainID tx.Body.Account = makeTestAddress(t) tx.Body.Recipient = makeTestAddress(t) - err = executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err = executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.EqualError(t, err, types.ErrTxHasInvalidHash.Error(), "execute tx body with account") signTestAddress(t, tx) - err = executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err = executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.EqualError(t, err, types.ErrTxNonceTooLow.Error(), "execute tx body with account") tx.Body.Nonce = 1 tx.Body.Amount = new(big.Int).Add(types.StakingMinimum, types.StakingMinimum).Bytes() signTestAddress(t, tx) - err = executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err = executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.EqualError(t, err, types.ErrInsufficientBalance.Error(), "execute tx body with nonce") tx.Body.Amount = types.MaxAER.Bytes() signTestAddress(t, tx) - err = executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err = executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.EqualError(t, err, types.ErrInsufficientBalance.Error(), "execute tx body with nonce") } @@ -102,13 +102,13 @@ func TestBasicExecuteTx(t *testing.T) { tx.Body.Recipient = makeTestAddress(t) tx.Body.Nonce = 1 signTestAddress(t, tx) - err := executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err := executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.NoError(t, err, "execute amount 0") tx.Body.Nonce = 2 tx.Body.Amount = new(big.Int).SetUint64(1000).Bytes() signTestAddress(t, tx) - err = executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err = executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.NoError(t, err, "execute amount 1000") tx.Body.Nonce = 3 @@ -118,7 +118,7 @@ func TestBasicExecuteTx(t *testing.T) { tx.Body.Type = types.TxType_GOVERNANCE tx.Body.Payload = []byte(`{"Name":"v1stake"}`) signTestAddress(t, tx) - err = executeTx(bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) + err = executeTx(nil, bs, types.NewTransaction(tx), 0, 0, nil, contract.ChainService, chainID) assert.NoError(t, err, "execute governance type") } diff --git a/chain/chainservice.go b/chain/chainservice.go index 9c9dd2925..2abae5a7a 100644 --- a/chain/chainservice.go +++ b/chain/chainservice.go @@ -12,6 +12,7 @@ import ( "math/big" "reflect" "runtime" + "sync/atomic" "github.com/aergoio/aergo-actor/actor" "github.com/aergoio/aergo-lib/log" @@ -27,8 +28,8 @@ import ( "github.com/aergoio/aergo/pkg/component" "github.com/aergoio/aergo/state" "github.com/aergoio/aergo/types" - lru "github.com/hashicorp/golang-lru" - peer "github.com/libp2p/go-libp2p-peer" + "github.com/hashicorp/golang-lru" + "github.com/libp2p/go-libp2p-peer" ) var ( @@ -37,6 +38,10 @@ var ( dfltErrBlocks = 128 ErrNotSupportedConsensus = errors.New("not supported by this consensus") + ErrRecoNoBestStateRoot = errors.New("state root of best block is not exist") + ErrRecoInvalidSdbRoot = errors.New("state root of sdb is invalid") + + TestDebugger *Debugger ) // Core represents a storage layer of a blockchain (chain & state DB). @@ -176,7 +181,7 @@ type IChainHandler interface { getAccountVote(id []string, addr []byte) (*types.AccountVoteInfo, error) getVotes(id string, n uint32) (*types.VoteList, error) getStaking(addr []byte) (*types.Staking, error) - getNameInfo(name string) (*types.NameInfo, error) + getNameInfo(name string, blockNo types.BlockNo) (*types.NameInfo, error) addBlock(newBlock *types.Block, usedBstate *state.BlockState, peerID peer.ID) error getAnchorsNew() (ChainAnchor, types.BlockNo, error) findAncestor(Hashes [][]byte) (*types.BlockInfo, error) @@ -198,15 +203,23 @@ type ChainService struct { chainWorker *ChainWorker chainManager *ChainManager + + stat stats + + recovered atomic.Value + debuggable bool } // NewChainService creates an instance of ChainService. func NewChainService(cfg *cfg.Config) *ChainService { cs := &ChainService{ - cfg: cfg, - op: NewOrphanPool(), + cfg: cfg, + op: NewOrphanPool(DfltOrphanPoolSize), + stat: newStats(), } + cs.setRecovered(false) + var err error if cs.Core, err = NewCore(cfg.DbType, cfg.DataDir, cfg.EnableTestmode, types.BlockNo(cfg.Blockchain.ForceResetHeight)); err != nil { logger.Fatal().Err(err).Msg("failed to initialize DB") @@ -259,9 +272,16 @@ func NewChainService(cfg *cfg.Config) *ChainService { contract.PubNet = pubNet contract.StartLStateFactory() + // init Debugger + cs.initDebugger() + return cs } +func (cs *ChainService) initDebugger() { + TestDebugger = newDebugger() +} + // SDB returns cs.sdb. func (cs *ChainService) SDB() *state.ChainStateDB { return cs.sdb @@ -272,6 +292,11 @@ func (cs *ChainService) CDB() consensus.ChainDB { return cs.cdb } +// CDB returns cs.sdb as a consensus.ChainDbReader. +func (cs *ChainService) WalDB() consensus.ChainWAL { + return cs.cdb +} + // GetConsensusInfo returns consensus-related information, which is different // from consensus to consensus. func (cs *ChainService) GetConsensusInfo() string { @@ -282,6 +307,10 @@ func (cs *ChainService) GetConsensusInfo() string { return cs.Info() } +func (cs *ChainService) GetChainStats() string { + return cs.stat.JSON() +} + // SetChainConsensus sets cs.cc to cc. func (cs *ChainService) SetChainConsensus(cc consensus.ChainConsensus) { cs.ChainConsensus = cc @@ -309,6 +338,10 @@ func (cs *ChainService) BeforeStop() { } func (cs *ChainService) notifyBlock(block *types.Block, isByBP bool) { + if !cs.NeedNotify() { + return + } + cs.BaseComponent.RequestTo(message.P2PSvc, &message.NotifyNewBlock{ Produced: isByBP, @@ -317,8 +350,30 @@ func (cs *ChainService) notifyBlock(block *types.Block, isByBP bool) { }) } +func (cs *ChainService) setRecovered(val bool) { + cs.recovered.Store(val) + return +} + +func (cs *ChainService) isRecovered() bool { + var val bool + aopv := cs.recovered.Load() + if aopv != nil { + val = aopv.(bool) + } else { + panic("ChainService: recovered is nil") + } + return val +} + // Receive actor message func (cs *ChainService) Receive(context actor.Context) { + if !cs.isRecovered() { + err := cs.Recover() + if err != nil { + logger.Fatal().Err(err).Msg("CHAIN DATA IS CRASHED, BUT CAN'T BE RECOVERED") + } + } switch msg := context.Message().(type) { case *message.AddBlock, @@ -452,16 +507,18 @@ func (cs *ChainService) getStaking(addr []byte) (*types.Staking, error) { return staking, nil } -func (cs *ChainService) getNameInfo(qname string) (*types.NameInfo, error) { - scs, err := cs.sdb.GetStateDB().OpenContractStateAccount(types.ToAccountID([]byte(types.AergoName))) - if err != nil { - return nil, err - } - owner := name.GetOwner(scs, []byte(qname)) - if owner == nil { - return &types.NameInfo{Name: &types.Name{Name: string(qname)}, Owner: nil}, types.ErrNameNotFound +func (cs *ChainService) getNameInfo(qname string, blockNo types.BlockNo) (*types.NameInfo, error) { + var stateDB *state.StateDB + if blockNo != 0 { + block, err := cs.cdb.GetBlockByNo(blockNo) + if err != nil { + return nil, err + } + stateDB = cs.sdb.OpenNewStateDB(block.GetHeader().GetBlocksRootHash()) + } else { + stateDB = cs.sdb.GetStateDB() } - return &types.NameInfo{Name: &types.Name{Name: string(qname)}, Owner: owner, Destination: name.GetAddress(scs, []byte(qname))}, err + return name.GetNameInfo(stateDB, qname) } type ChainManager struct { @@ -496,6 +553,8 @@ func newChainWorker(cs *ChainService, cntWorker int, core *Core) *ChainWorker { } func (cm *ChainManager) Receive(context actor.Context) { + defer RecoverExit() + switch msg := context.Message().(type) { case *message.AddBlock: @@ -535,7 +594,6 @@ func (cm *ChainManager) Receive(context actor.Context) { case *message.GetAncestor: hashes := msg.Hashes ancestor, err := cm.findAncestor(hashes) - context.Respond(message.GetAncestorRsp{ Ancestor: ancestor, Err: err, @@ -668,7 +726,7 @@ func (cw *ChainWorker) Receive(context actor.Context) { context.Respond(message.GetQueryRsp{Result: nil, Err: err}) } else { bs := state.NewBlockState(cw.sdb.OpenNewStateDB(cw.sdb.GetRoot())) - ret, err := contract.Query(address, bs, ctrState, msg.Queryinfo) + ret, err := contract.Query(address, bs, cw.cdb, ctrState, msg.Queryinfo) context.Respond(message.GetQueryRsp{Result: ret, Err: err}) } case *message.GetStateQuery: @@ -728,7 +786,7 @@ func (cw *ChainWorker) Receive(context actor.Context) { Err: err, }) case *message.GetNameInfo: - owner, err := cw.getNameInfo(msg.Name) + owner, err := cw.getNameInfo(msg.Name, msg.BlockNo) context.Respond(&message.GetNameInfoRsp{ Owner: owner, Err: err, diff --git a/chain/chainservice_test.go b/chain/chainservice_test.go index 9b767a828..a12853c14 100644 --- a/chain/chainservice_test.go +++ b/chain/chainservice_test.go @@ -1,10 +1,10 @@ package chain import ( + "bytes" "fmt" "testing" - "github.com/aergoio/aergo-lib/db" "github.com/aergoio/aergo/config" "github.com/aergoio/aergo/consensus" "github.com/aergoio/aergo/state" @@ -40,7 +40,7 @@ func (stubC *StubConsensus) IsBlockValid(block *types.Block, bestBlock *types.Bl func (stubC *StubConsensus) Update(block *types.Block) { } -func (stubC *StubConsensus) Save(tx db.Transaction) error { +func (stubC *StubConsensus) Save(tx consensus.TxWriter) error { return nil } func (stubC *StubConsensus) NeedReorganization(rootNo types.BlockNo) bool { @@ -52,6 +52,12 @@ func (stubC *StubConsensus) Info() string { func (stubC *StubConsensus) GetType() consensus.ConsensusType { return consensus.ConsensusSBP } +func (stubC *StubConsensus) NeedNotify() bool { + return true +} +func (stubC *StubConsensus) HasWAL() bool { + return false +} func makeBlockChain() *ChainService { serverCtx := config.NewServerContext("", "") @@ -72,7 +78,6 @@ func makeBlockChain() *ChainService { return cs } -// Test add block to height 0 chain func testAddBlock(t *testing.T, best int) (*ChainService, *StubBlockChain) { cs := makeBlockChain() @@ -104,6 +109,22 @@ func testAddBlock(t *testing.T, best int) (*ChainService, *StubBlockChain) { return cs, stubChain } +// Test add block to height 0 chain +func testAddBlockNoTest(best int) (*ChainService, *StubBlockChain) { + cs := makeBlockChain() + + genesisBlk, _ := cs.getBlockByNo(0) + + stubChain := InitStubBlockChain([]*types.Block{genesisBlk}, best) + + for i := 1; i <= best; i++ { + newBlock := stubChain.GetBlockByNo(uint64(i)) + _ = cs.addBlock(newBlock, nil, testPeer) + } + + return cs, stubChain +} + func TestAddBlock(t *testing.T) { testAddBlock(t, 1) testAddBlock(t, 10) @@ -208,6 +229,7 @@ func TestSideChainReorg(t *testing.T) { //check if reorg is succeed mainBestBlock, _ = cs.GetBestBlock() assert.Equal(t, sideBestBlock.GetHeader().BlockNo, mainBestBlock.GetHeader().BlockNo) + assert.Equal(t, sideBestBlock.BlockHash(), mainBestBlock.BlockHash()) } @@ -249,7 +271,124 @@ func TestResetChain(t *testing.T) { } } -//TODO -func TestParallelAccess(t *testing.T) { +func TestReorgCrashRecoverBeforeReorgMarker(t *testing.T) { + cs, mainChain, sideChain := testSideBranch(t, 5) + + // add heigher block to sideChain + sideChain.GenAddBlock() + assert.Equal(t, mainChain.Best+1, sideChain.Best) + + sideBestBlock, err := sideChain.GetBestBlock() + assert.NoError(t, err) + + //check top block before reorg + orgBestBlock, _ := cs.GetBestBlock() + assert.Equal(t, mainChain.Best, int(orgBestBlock.GetHeader().BlockNo)) + assert.Equal(t, mainChain.BestBlock.BlockHash(), orgBestBlock.BlockHash()) + assert.Equal(t, orgBestBlock.GetHeader().BlockNo+1, sideBestBlock.GetHeader().BlockNo) + + TestDebugger = newDebugger() + TestDebugger.Set(DEBUG_CHAIN_STOP, 1, false) + + err = cs.addBlock(sideBestBlock, nil, testPeer) + assert.Error(t, &ErrReorg{}) + assert.Equal(t, err.(*ErrReorg).err, &ErrDebug{cond: DEBUG_CHAIN_STOP, value: 1}) + + // check if chain meta is not changed + newBestBlock, _ := cs.GetBestBlock() + assert.Equal(t, newBestBlock.GetHeader().BlockNo, orgBestBlock.GetHeader().BlockNo) + + TestDebugger.clear() + cs.errBlocks.Purge() + + // chain swap is not complete, so has nothing to do + err = cs.Recover() + assert.Nil(t, err) +} +func TestReorgCrashRecoverAfterReorgMarker(t *testing.T) { + testReorgCrashRecoverCond(t, DEBUG_CHAIN_STOP, 2) + testReorgCrashRecoverCond(t, DEBUG_CHAIN_STOP, 3) +} + +func testReorgCrashRecoverCond(t *testing.T, cond StopCond, value int) { + cs, mainChain, sideChain := testSideBranch(t, 5) + + // add heigher block to sideChain + sideChain.GenAddBlock() + assert.Equal(t, mainChain.Best+1, sideChain.Best) + + sideBestBlock, err := sideChain.GetBestBlock() + assert.NoError(t, err) + + //check top block before reorg + orgBestBlock, _ := cs.GetBestBlock() + assert.Equal(t, mainChain.Best, int(orgBestBlock.GetHeader().BlockNo)) + assert.Equal(t, mainChain.BestBlock.BlockHash(), orgBestBlock.BlockHash()) + assert.Equal(t, orgBestBlock.GetHeader().BlockNo+1, sideBestBlock.GetHeader().BlockNo) + + TestDebugger = newDebugger() + TestDebugger.Set(cond, value, false) + + err = cs.addBlock(sideBestBlock, nil, testPeer) + assert.Error(t, &ErrReorg{}) + assert.Equal(t, err.(*ErrReorg).err, &ErrDebug{cond: cond, value: value}) + + assert.True(t, !checkRecoveryDone(t, cs.cdb, sideChain)) + + TestDebugger.clear() + cs.errBlocks.Purge() + + // must recover chainDB before chainservice.Recover() + err = cs.cdb.recover() + assert.Nil(t, err) + + // chain swap is not complete, so has nothing to do + err = cs.Recover() + assert.Nil(t, err) + + assert.True(t, checkRecoveryDone(t, cs.cdb, sideChain)) + + var marker *ReorgMarker + marker, err = cs.cdb.getReorgMarker() + assert.Nil(t, err) + assert.Nil(t, marker) +} + +// checkRecoveryDone checks if recovery is complete. +// 1. all blocks of chain has (no/hash) mapping +// 2. old receipts is deleted and new receipt is added if blocks have tx +// 3. old tx mapping is deleted and new tx mapping is added +func checkRecoveryDone(t *testing.T, cdb *ChainDB, chain *StubBlockChain) bool { + // check block mapping + for i := 0; i <= chain.Best; i++ { + block := chain.Blocks[i] + dbBlk, err := cdb.GetBlockByNo(block.GetHeader().GetBlockNo()) + assert.Nil(t, err) + assert.NotNil(t, dbBlk) + + if !checkBlockEqual(t, block, dbBlk) { + return false + } + } + + if marker, err := cdb.getReorgMarker(); err == nil && marker != nil { + return false + } + + return true +} + +func checkBlockEqual(t *testing.T, x *types.Block, y *types.Block) bool { + if (x == nil) != (y == nil) { + t.Log("x or y is nil") + return false + } + + if !bytes.Equal(x.BlockHash(), y.BlockHash()) || x.Header.GetBlockNo() != y.Header.GetBlockNo() { + t.Logf("stubchain:db", x.GetHeader().GetBlockNo(), x.ID(), y.GetHeader().GetBlockNo(), y.ID()) + return false + } + + return true } diff --git a/chain/common.go b/chain/common.go index 8230c2b6e..d23975f49 100644 --- a/chain/common.go +++ b/chain/common.go @@ -26,6 +26,8 @@ var ( maxBlockSize uint32 pubNet bool consensusName string + + Genesis *types.Genesis ) var ( @@ -79,6 +81,8 @@ func initChainParams(genesis *types.Genesis) { types.MaxAER = genesis.TotalBalance() logger.Info().Str("TotalBalance", types.MaxAER.String()).Msg("set total from genesis") } + + Genesis = genesis } // MaxBlockBodySize returns the max block body size. diff --git a/chain/debugger.go b/chain/debugger.go new file mode 100644 index 000000000..f1f881fdf --- /dev/null +++ b/chain/debugger.go @@ -0,0 +1,204 @@ +package chain + +import ( + "fmt" + "os" + "strconv" + "sync" + "time" +) + +type ErrDebug struct { + cond StopCond + value int +} + +type StopCond int + +// stop before swap chain +const ( + DEBUG_CHAIN_STOP StopCond = 0 + iota + DEBUG_CHAIN_RANDOM_STOP + DEBUG_CHAIN_BP_SLEEP + DEBUG_CHAIN_OTHER_SLEEP + DEBUG_SYNCER_CRASH + DEBUG_RAFT_SNAP_FREQ // change snap frequency after first snapshot +) + +const ( + DEBUG_CHAIN_STOP_INF = DEBUG_RAFT_SNAP_FREQ +) + +var ( + EnvNameStaticCrash = "DEBUG_CHAIN_CRASH" // 1 ~ 4 + EnvNameRandomCrashTime = "DEBUG_RANDOM_CRASH_TIME" // 1 ~ 600000(=10min) ms + EnvNameChainBPSleep = "DEBUG_CHAIN_BP_SLEEP" // bp node sleeps before connecting block for each block (ms). used + EnvNameChainOtherSleep = "DEBUG_CHAIN_OTHER_SLEEP" // non bp node sleeps before connecting block for each block (ms). + EnvNameSyncCrash = "DEBUG_SYNCER_CRASH" // case 1 + EnvNameRaftSnapFreq = "DEBUG_RAFT_SNAP_FREQ" // case 1 +) + +var stopConds = [...]string{ + EnvNameStaticCrash, + EnvNameRandomCrashTime, + EnvNameChainBPSleep, + EnvNameChainOtherSleep, + EnvNameSyncCrash, + EnvNameRaftSnapFreq, +} + +type DebugHandler func(value int) error + +func (c StopCond) String() string { return stopConds[c] } + +func (ec *ErrDebug) Error() string { + return fmt.Sprintf("stopped by debugger cond[%s]=%d", ec.cond.String(), ec.value) +} + +type Debugger struct { + sync.RWMutex + condMap map[StopCond]int + isEnv map[StopCond]bool +} + +func newDebugger() *Debugger { + dbg := &Debugger{condMap: make(map[StopCond]int), isEnv: make(map[StopCond]bool)} + + checkEnv := func(condName StopCond) { + envName := stopConds[condName] + + envStr := os.Getenv(envName) + if len(envStr) > 0 { + val, err := strconv.Atoi(envStr) + if err != nil { + logger.Error().Err(err).Msgf("%s environment varialble must be integer", envName) + return + } + logger.Debug().Int("value", val).Msgf("env variable[%s] is set", envName) + + dbg.Set(condName, val, true) + } + } + + checkEnv(DEBUG_CHAIN_STOP) + checkEnv(DEBUG_CHAIN_RANDOM_STOP) + checkEnv(DEBUG_CHAIN_BP_SLEEP) + checkEnv(DEBUG_CHAIN_OTHER_SLEEP) + checkEnv(DEBUG_SYNCER_CRASH) + checkEnv(DEBUG_RAFT_SNAP_FREQ) + + return dbg +} + +func (debug *Debugger) Set(cond StopCond, value int, env bool) { + if debug == nil { + return + } + + debug.Lock() + defer debug.Unlock() + + logger.Debug().Int("cond", int(cond)).Str("name", stopConds[cond]).Int("val", value).Msg("set debug condition") + + debug.condMap[cond] = value + debug.isEnv[cond] = env +} + +func (debug *Debugger) Unset(cond StopCond) { + if debug == nil { + return + } + + debug.Lock() + defer debug.Unlock() + + logger.Debug().Str("cond", cond.String()).Msg("deubugger condition is unset") + delete(debug.condMap, cond) +} + +func (debug *Debugger) clear() { + if debug == nil { + return + } + + debug.Lock() + defer debug.Unlock() + + debug.condMap = make(map[StopCond]int) + debug.isEnv = make(map[StopCond]bool) +} + +func (debug *Debugger) Check(cond StopCond, value int, handler DebugHandler) error { + if debug == nil { + return nil + } + + debug.Lock() + defer debug.Unlock() + + if setVal, ok := debug.condMap[cond]; ok { + logger.Debug().Str("cond", stopConds[cond]).Int("val", setVal).Msg("check debug condition") + + switch cond { + case DEBUG_CHAIN_STOP: + if setVal == value { + if debug.isEnv[cond] { + logger.Fatal().Str("cond", stopConds[cond]).Msg("shutdown by DEBUG_CHAIN_CRASH") + } else { + return &ErrDebug{cond: cond, value: value} + } + } + + case DEBUG_CHAIN_RANDOM_STOP: + go crashRandom(setVal) + handleCrashRandom(setVal) + + case DEBUG_CHAIN_OTHER_SLEEP, DEBUG_CHAIN_BP_SLEEP: + handleChainSleep(setVal) + + case DEBUG_SYNCER_CRASH: + if setVal == value { + return handleSyncerCrash(setVal, cond) + } + case DEBUG_RAFT_SNAP_FREQ: + handler(setVal) + } + } + + return nil +} + +func handleChainSleep(sleepMils int) { + logger.Debug().Int("sleep(ms)", sleepMils).Msg("before chain sleep") + + time.Sleep(time.Millisecond * time.Duration(sleepMils)) + + logger.Debug().Msg("after chain sleep") +} + +func handleCrashRandom(waitMils int) { + logger.Debug().Int("after(ms)", waitMils).Msg("before random crash") + + go crashRandom(waitMils) +} + +func handleSyncerCrash(val int, cond StopCond) error { + if val == 1 { + logger.Fatal().Int("val", val).Msg("sync crash by DEBUG_SYNC_CRASH") + return nil + } else { + return &ErrDebug{cond: cond, value: val} + } +} + +func crashRandom(waitMils int) { + if waitMils <= 0 { + return + } + + time.Sleep(time.Millisecond * time.Duration(waitMils)) + + logger.Debug().Msg("shutdown by DEBUG_RANDOM_CRASH_TIME") + + os.Exit(100) +} diff --git a/chain/orphanpool.go b/chain/orphanpool.go index ee51b6044..562a9711e 100644 --- a/chain/orphanpool.go +++ b/chain/orphanpool.go @@ -6,29 +6,45 @@ package chain import ( + "errors" "sync" - "time" "github.com/aergoio/aergo/internal/enc" "github.com/aergoio/aergo/types" + "github.com/hashicorp/golang-lru/simplelru" +) + +var ( + DfltOrphanPoolSize = 100 + + ErrRemoveOldestOrphan = errors.New("failed to remove oldest orphan block") + ErrNotExistOrphanLRU = errors.New("given orphan doesn't exist in lru") ) type OrphanBlock struct { - block *types.Block - expiretime time.Time + *types.Block } type OrphanPool struct { sync.RWMutex - cache map[types.BlockID]*OrphanBlock + cache map[types.BlockID]*OrphanBlock + lru *simplelru.LRU + maxCnt int curCnt int } -func NewOrphanPool() *OrphanPool { +func NewOrphanPool(size int) *OrphanPool { + lru, err := simplelru.NewLRU(DfltOrphanPoolSize, nil) + if err != nil { + logger.Fatal().Err(err).Msg("failed to init lru") + return nil + } + return &OrphanPool{ cache: map[types.BlockID]*OrphanBlock{}, - maxCnt: 1000, + lru: lru, + maxCnt: size, curCnt: 0, } } @@ -41,19 +57,22 @@ func (op *OrphanPool) addOrphan(block *types.Block) error { cachedblock, exists := op.cache[id] if exists { logger.Debug().Str("hash", block.ID()). - Str("cached", cachedblock.block.ID()).Msg("already exist") + Str("cached", cachedblock.ID()).Msg("already exist") return nil } - if op.maxCnt == op.curCnt { + if op.isFull() { logger.Debug().Msg("orphan block pool is full") // replace one - op.removeOldest() - } - op.cache[id] = &OrphanBlock{ - block: block, - expiretime: time.Now().Add(time.Hour), + if err := op.removeOldest(); err != nil { + return err + } } + + orpEntry := &OrphanBlock{Block: block} + + op.cache[id] = orpEntry + op.lru.Add(id, orpEntry) op.curCnt++ return nil @@ -69,40 +88,49 @@ func (op *OrphanPool) getRoot(block *types.Block) types.BlockID { break } orphanRoot = prevID - prevID = types.ToBlockID(orphan.block.Header.PrevBlockHash) + prevID = types.ToBlockID(orphan.Header.PrevBlockHash) } return orphanRoot } +func (op *OrphanPool) isFull() bool { + return op.maxCnt == op.curCnt +} + // remove oldest block, but also remove expired -func (op *OrphanPool) removeOldest() { - // remove all expired - var oldest *OrphanBlock - for key, orphan := range op.cache { - if time.Now().After(orphan.expiretime) { - logger.Debug().Str("hash", key.String()).Msg("orphan block removed(expired)") - op.removeOrphan(key) - } +func (op *OrphanPool) removeOldest() error { + var ( + id types.BlockID + ) - // choose at least one victim - if oldest == nil || orphan.expiretime.Before(oldest.expiretime) { - oldest = orphan - } + if !op.isFull() { + return nil } - // remove oldest one - if op.curCnt == op.maxCnt { - id := types.ToBlockID(oldest.block.Header.PrevBlockHash) - logger.Debug().Str("hash", id.String()).Msg("orphan block removed(oldest)") - op.removeOrphan(id) + key, _, ok := op.lru.GetOldest() + if !ok { + return ErrRemoveOldestOrphan } + + id = key.(types.BlockID) + if err := op.removeOrphan(id); err != nil { + return err + } + + logger.Debug().Str("hash", id.String()).Msg("orphan block removed(oldest)") + + return nil } // remove one single element by id (must succeed) -func (op *OrphanPool) removeOrphan(id types.BlockID) { - delete(op.cache, id) +func (op *OrphanPool) removeOrphan(id types.BlockID) error { op.curCnt-- + delete(op.cache, id) + if exist := op.lru.Remove(id); !exist { + return ErrNotExistOrphanLRU + } + return nil } func (op *OrphanPool) getOrphan(hash []byte) *types.Block { @@ -112,6 +140,6 @@ func (op *OrphanPool) getOrphan(hash []byte) *types.Block { if !exists { return nil } else { - return orphan.block + return orphan.Block } } diff --git a/chain/orphanpool_test.go b/chain/orphanpool_test.go new file mode 100644 index 000000000..b7cf5c88e --- /dev/null +++ b/chain/orphanpool_test.go @@ -0,0 +1,117 @@ +package chain + +import ( + "github.com/aergoio/aergo/types" + "github.com/stretchr/testify/assert" + "testing" +) + +func checkExist(t *testing.T, orp *OrphanPool, blk *types.Block) { + var orphan *types.Block + orphan = orp.getOrphan(blk.Header.GetPrevBlockHash()) + + assert.NotNil(t, orphan) + assert.Equal(t, orphan.BlockHash(), blk.BlockHash()) +} + +func TestOrphanPool(t *testing.T) { + // measure time to add block + var stubChain *StubBlockChain + var orp *OrphanPool + var orphan *types.Block + + orp = NewOrphanPool(5) + + _, stubChain = testAddBlockNoTest(10) + + start := 1 + for i := start; i <= 5; i++ { + blk := stubChain.GetBlockByNo(uint64(i)) + + err := orp.addOrphan(blk) + assert.NoError(t, err) + + checkExist(t, orp, blk) + } + + // check pool is full + assert.True(t, orp.isFull()) + + // remove oldest and put new one + blk := stubChain.GetBlockByNo(6) + err := orp.addOrphan(blk) + assert.NoError(t, err) + + checkExist(t, orp, blk) + + // first block is removed + startBlock := stubChain.GetBlockByNo(uint64(start)) + orphan = orp.getOrphan(startBlock.Header.GetPrevBlockHash()) + assert.Nil(t, orphan) + + assert.True(t, orp.isFull()) +} + +func TestOrphanSamePrev(t *testing.T) { + var orp *OrphanPool + + mainChainBest := 3 + _, mainChain := testAddBlock(t, mainChainBest) + + // make branch + sideChain := InitStubBlockChain(mainChain.Blocks[0:mainChainBest+1], 1) + + // make fork + mainChain.GenAddBlock() + + mBest := mainChain.BestBlock + sBest := sideChain.BestBlock + + assert.Equal(t, mBest.PrevBlockID(), sBest.PrevBlockID()) + + // No.4 blocks of mainchain and sidechain have same previous hash + orp = NewOrphanPool(5) + + err := orp.addOrphan(mBest) + assert.NoError(t, err) + + err = orp.addOrphan(sBest) + assert.NoError(t, err) + + checkExist(t, orp, mBest) + + orphan := orp.getOrphan(sBest.Header.GetPrevBlockHash()) + assert.Equal(t, orphan.BlockHash(), mBest.BlockHash()) +} + +func BenchmarkOrphanPoolWhenPool(b *testing.B) { + b.ResetTimer() + + // measure time to add block + start := 1001 + + var stubChain *StubBlockChain + var orp *OrphanPool + + b.StopTimer() + + orp = NewOrphanPool(300) + _, stubChain = testAddBlockNoTest(11000) + // make pool to be full + for i := 1; i <= 1000; i++ { + blk := stubChain.GetBlockByNo(uint64(i)) + + orp.addOrphan(blk) + } + + b.StartTimer() + + for i := 0; i < b.N; i++ { + + idx := start + (i % 10000) + + blk := stubChain.GetBlockByNo(uint64(idx)) + + orp.addOrphan(blk) + } +} diff --git a/chain/recover.go b/chain/recover.go new file mode 100644 index 000000000..d74060bae --- /dev/null +++ b/chain/recover.go @@ -0,0 +1,240 @@ +package chain + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "github.com/aergoio/aergo/internal/enc" + "github.com/aergoio/aergo/types" + "os" + "runtime" + "runtime/debug" +) + +var ( + ErrInvalidPrevHash = errors.New("no of previous hash block is invalid") + ErrRecoInvalidBest = errors.New("best block is not equal to old chain") +) + +func RecoverExit() { + if r := recover(); r != nil { + logger.Error().Str("callstack", string(debug.Stack())).Msg("panic occurred in chain manager") + os.Exit(10) + } +} + +// Recover has 2 situation +// 1. normal recovery +// normal recovery recovers error that has occures while adding single block +// 2. reorg recovery +// reorg recovery recovers error that has occures while executing reorg +func (cs *ChainService) Recover() error { + defer RecoverExit() + + cs.setRecovered(true) + + // check if reorg marker exists + marker, err := cs.cdb.getReorgMarker() + if err != nil { + return err + } + + if marker == nil { + // normal recover + // TODO check state root maker of bestblock + if err := cs.recoverNormal(); err != nil { + return err + } + return nil + } + + logger.Info().Str("reorg marker", marker.toString()).Msg("chain recovery started") + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + best, err := cs.GetBestBlock() + if err != nil { + return err + } + + // check status of chain + if !bytes.Equal(best.BlockHash(), marker.BrBestHash) { + logger.Error().Str("best", best.ID()).Str("markerbest", enc.ToString(marker.BrBestHash)).Msg("best block is not equal to old chain") + return ErrRecoInvalidBest + } + + if err = cs.recoverReorg(marker); err != nil { + return err + } + + return nil +} + +// recover from normal +// set stateroot for bestblock +// when panic occured, memory state of server may not be consistent. +// so restart server when panic in chainservice +func (cs *ChainService) recoverNormal() error { + best, err := cs.GetBestBlock() + if err != nil { + return err + } + + logger.Info().Msg("check for crash recovery") + + stateDB := cs.sdb.GetStateDB() + if !stateDB.HasMarker(best.GetHeader().GetBlocksRootHash()) { + logger.Warn().Str("besthash", best.ID()).Uint64("no", best.GetHeader().GetBlockNo()).Msg("marker of state root does not exist") + } + + if !bytes.Equal(cs.sdb.GetStateDB().GetRoot(), best.GetHeader().GetBlocksRootHash()) { + return ErrRecoInvalidSdbRoot + } + + return nil +} + +// recoverReorg redo task that need to be performed after swapping chain meta +// 1. delete receipts of rollbacked blocks +// 2. swap tx mapping +func (cs *ChainService) recoverReorg(marker *ReorgMarker) error { + // build reorgnizer from reorg marker + topBlock, err := cs.GetBlock(marker.BrTopHash) + if err != nil { + return err + } + + if err = cs.reorg(topBlock, marker); err != nil { + logger.Error().Err(err).Msg("failed to retry reorg") + return err + } + + logger.Info().Msg("recovery end") + return nil +} + +type ReorgMarker struct { + cdb *ChainDB + BrStartHash []byte + BrStartNo types.BlockNo + BrBestHash []byte + BrBestNo types.BlockNo + BrTopHash []byte + BrTopNo types.BlockNo +} + +func NewReorgMarker(reorg *reorganizer) *ReorgMarker { + return &ReorgMarker{ + cdb: reorg.cs.cdb, + BrStartHash: reorg.brStartBlock.BlockHash(), + BrStartNo: reorg.brStartBlock.GetHeader().GetBlockNo(), + BrBestHash: reorg.bestBlock.BlockHash(), + BrBestNo: reorg.bestBlock.GetHeader().GetBlockNo(), + BrTopHash: reorg.brTopBlock.BlockHash(), + BrTopNo: reorg.brTopBlock.GetHeader().GetBlockNo(), + } +} + +// RecoverChainMapping rollback chain (no/hash) mapping to old chain of reorg. +// it is required for LIB loading +func (rm *ReorgMarker) RecoverChainMapping(cdb *ChainDB) error { + best, err := cdb.GetBestBlock() + if err != nil { + return err + } + + if bytes.Equal(best.BlockHash(), rm.BrBestHash) { + return nil + } + + logger.Info().Str("marker", rm.toString()).Str("curbest", best.ID()).Uint64("curbestno", best.GetHeader().GetBlockNo()).Msg("start to recover chain mapping") + + bestBlock, err := cdb.getBlock(rm.BrBestHash) + if err != nil { + return err + } + + bulk := cdb.store.NewBulk() + defer bulk.DiscardLast() + + var tmpBlkNo types.BlockNo + var tmpBlk *types.Block + + // remove unnecessary chain mapping of new chain + for tmpBlkNo = rm.BrTopNo; tmpBlkNo > rm.BrBestNo; tmpBlkNo-- { + logger.Debug().Uint64("no", tmpBlkNo).Msg("delete chain mapping of new chain") + bulk.Delete(types.BlockNoToBytes(tmpBlkNo)) + } + + tmpBlk = bestBlock + tmpBlkNo = tmpBlk.GetHeader().GetBlockNo() + + for tmpBlkNo > rm.BrStartNo { + logger.Debug().Str("hash", tmpBlk.ID()).Uint64("no", tmpBlkNo).Msg("update chain mapping to old chain") + + bulk.Set(types.BlockNoToBytes(tmpBlkNo), tmpBlk.BlockHash()) + + if tmpBlk, err = cdb.getBlock(tmpBlk.GetHeader().GetPrevBlockHash()); err != nil { + return err + } + + if tmpBlkNo != tmpBlk.GetHeader().GetBlockNo()+1 { + return ErrInvalidPrevHash + } + tmpBlkNo = tmpBlk.GetHeader().GetBlockNo() + } + + logger.Info().Uint64("bestno", rm.BrBestNo).Msg("update best block") + + bulk.Set(latestKey, types.BlockNoToBytes(rm.BrBestNo)) + bulk.Flush() + + cdb.setLatest(bestBlock) + + logger.Info().Msg("succeed to recover chain mapping") + return nil +} + +func (rm *ReorgMarker) setCDB(cdb *ChainDB) { + rm.cdb = cdb +} + +func (rm *ReorgMarker) write() error { + if err := rm.cdb.writeReorgMarker(rm); err != nil { + return err + } + + return nil +} + +func (rm *ReorgMarker) delete() { + rm.cdb.deleteReorgMarker() +} + +func (rm *ReorgMarker) toBytes() ([]byte, error) { + var val bytes.Buffer + encoder := gob.NewEncoder(&val) + if err := encoder.Encode(rm); err != nil { + return nil, err + } + + return val.Bytes(), nil +} + +func (rm *ReorgMarker) toString() string { + buf := "" + + if len(rm.BrStartHash) != 0 { + buf = buf + fmt.Sprintf("branch root=(%d, %s).", rm.BrStartNo, enc.ToString(rm.BrStartHash)) + } + if len(rm.BrTopHash) != 0 { + buf = buf + fmt.Sprintf("branch top=(%d, %s).", rm.BrTopNo, enc.ToString(rm.BrTopHash)) + } + if len(rm.BrBestHash) != 0 { + buf = buf + fmt.Sprintf("org best=(%d, %s).", rm.BrBestNo, enc.ToString(rm.BrBestHash)) + } + + return buf +} diff --git a/chain/reorg.go b/chain/reorg.go index 1ac3fad24..6a63e62bc 100644 --- a/chain/reorg.go +++ b/chain/reorg.go @@ -4,10 +4,12 @@ import ( "bytes" "errors" "fmt" - "github.com/aergoio/aergo-lib/db" + "time" + "github.com/aergoio/aergo/consensus" "github.com/aergoio/aergo/internal/enc" "github.com/aergoio/aergo/message" + "github.com/aergoio/aergo/state" "github.com/aergoio/aergo/types" ) @@ -15,16 +17,34 @@ const ( initBlkCount = 20 ) +var ( + reorgKeyStr = "_reorg_marker_" + reorgKey = []byte(reorgKeyStr) +) + +var ( + ErrInvalidReorgMarker = errors.New("reorg marker is invalid") + ErrMarkerNil = errors.New("reorg marker is nil") +) + type reorganizer struct { //input info cs *ChainService - dbtx *db.Transaction + bestBlock *types.Block brTopBlock *types.Block //branch top block //collected info from chain brStartBlock *types.Block newBlocks []*types.Block //roll forward target blocks oldBlocks []*types.Block //roll back target blocks + + marker *ReorgMarker + + recover bool + + gatherFn func() error + gatherPostFn func() + executeBlockFn func(bstate *state.BlockState, block *types.Block) error } type ErrReorgBlock struct { @@ -72,63 +92,192 @@ func (cs *ChainService) needReorg(block *types.Block) bool { return isNeed } -//TODO: on booting, retry reorganizing -//TODO: on booting, delete played tx of block. because deleting txs from mempool is done after commit -//TODO: gather delete request of played tx (1 msg) -func (cs *ChainService) reorg(topBlock *types.Block) error { - reorgtx := cs.cdb.store.NewTx() - - logger.Info().Uint64("blockNo", topBlock.GetHeader().GetBlockNo()).Str("hash", topBlock.ID()). - Msg("reorg started") +func newReorganizer(cs *ChainService, topBlock *types.Block, marker *ReorgMarker) (*reorganizer, error) { + isReco := (marker != nil) reorg := &reorganizer{ cs: cs, - dbtx: &reorgtx, brTopBlock: topBlock, newBlocks: make([]*types.Block, 0, initBlkCount), oldBlocks: make([]*types.Block, 0, initBlkCount), + recover: isReco, + marker: marker, + } + + if isReco { + marker.setCDB(reorg.cs.cdb) + + if err := reorg.initRecovery(marker); err != nil { + return nil, err + } + + reorg.gatherFn = reorg.gatherReco + reorg.gatherPostFn = nil + reorg.executeBlockFn = cs.executeBlockReco + } else { + reorg.gatherFn = reorg.gather + reorg.gatherPostFn = reorg.newMarker + reorg.executeBlockFn = cs.executeBlock } - err := reorg.gatherChainInfo() + TestDebugger.Check(DEBUG_CHAIN_RANDOM_STOP, 0, nil) + + return reorg, nil +} + +//TODO: gather delete request of played tx (1 msg) +func (cs *ChainService) reorg(topBlock *types.Block, marker *ReorgMarker) error { + logger.Info().Uint64("blockNo", topBlock.GetHeader().GetBlockNo()).Str("hash", topBlock.ID()). + Bool("recovery", (marker != nil)).Msg("reorg started") + + begT := time.Now() + + reorg, err := newReorganizer(cs, topBlock, marker) if err != nil { + logger.Error().Err(err).Msg("new reorganazier failed") return err } + err = reorg.gatherFn() + if err != nil { + return err + } + + if reorg.gatherPostFn != nil { + reorg.gatherPostFn() + } + if !cs.NeedReorganization(reorg.brStartBlock.BlockNo()) { return consensus.ErrorConsensus{Msg: "reorganization rejected by consensus"} } - err = reorg.rollbackChain() + err = reorg.rollback() if err != nil { return err } //it's possible to occur error while executing branch block (forgery) - if err := reorg.rollforwardChain(); err != nil { + if err := reorg.rollforward(); err != nil { return err } if err := reorg.swapChain(); err != nil { + switch ec := err.(type) { + case *ErrDebug: + return ec + } + logger.Fatal().Err(err).Msg("reorg failed while swapping chain, it can't recover") return err } + cs.stat.updateEvent(ReorgStat, time.Since(begT), reorg.oldBlocks[0], reorg.newBlocks[0], reorg.brStartBlock) logger.Info().Msg("reorg end") return nil } +func (reorg *reorganizer) initRecovery(marker *ReorgMarker) error { + var startBlock, bestBlock, topBlock *types.Block + var err error + + if marker == nil { + return ErrMarkerNil + } + + topBlock = reorg.brTopBlock + + cdb := reorg.cs.cdb + + logger.Info().Str("marker", marker.toString()).Msg("new reorganizer") + + if startBlock, err = cdb.getBlock(marker.BrStartHash); err != nil { + return err + } + + if bestBlock, err = cdb.getBlock(marker.BrBestHash); err != nil { + return err + } + + if bestBlock.GetHeader().GetBlockNo() >= topBlock.GetHeader().GetBlockNo() || + startBlock.GetHeader().GetBlockNo() >= bestBlock.GetHeader().GetBlockNo() || + startBlock.GetHeader().GetBlockNo() >= topBlock.GetHeader().GetBlockNo() { + return ErrInvalidReorgMarker + } + + reorg.brStartBlock = startBlock + reorg.bestBlock = bestBlock + + return nil +} + +func (reorg *reorganizer) newMarker() { + if reorg.marker != nil { + return + } + + reorg.marker = NewReorgMarker(reorg) +} + // swap oldchain to newchain oneshot (best effort) // - chain height mapping // - tx mapping // - best block func (reorg *reorganizer) swapChain() error { - cs := reorg.cs - logger.Info().Msg("swap chain to new branch") - cs.cdb.swapChain(reorg.newBlocks) + if err := TestDebugger.Check(DEBUG_CHAIN_STOP, 1, nil); err != nil { + return err + } + + if err := reorg.marker.write(); err != nil { + return err + } + + if err := TestDebugger.Check(DEBUG_CHAIN_STOP, 2, nil); err != nil { + return err + } + + reorg.deleteOldReceipts() - reorg.swapTxMapping() + //TODO batch notification of rollforward blocks + + if err := reorg.swapTxMapping(); err != nil { + return err + } + + if err := reorg.swapChainMapping(); err != nil { + return err + } + + if err := TestDebugger.Check(DEBUG_CHAIN_STOP, 3, nil); err != nil { + return err + } + + reorg.marker.delete() + + return nil +} + +// swapChainMapping swaps chain meta from org chain to side chain and deleting reorg marker. +// it should be executed by 1 tx to be atomic. +func (reorg *reorganizer) swapChainMapping() error { + cdb := reorg.cs.cdb + + logger.Info().Msg("swap chain mapping for new branch") + + best, err := cdb.GetBestBlock() + if err != nil { + return err + } + + if reorg.recover && bytes.Equal(best.GetHash(), reorg.brTopBlock.GetHash()) { + logger.Warn().Msg("swap of chain mapping has already finished") + return nil + } + + if err := cdb.swapChainMapping(reorg.newBlocks); err != nil { + return err + } return nil } @@ -147,18 +296,23 @@ func (reorg *reorganizer) swapTxMapping() error { } } + var overwrap int + // insert new tx mapping for i := len(reorg.newBlocks) - 1; i >= 0; i-- { newBlock := reorg.newBlocks[i] for _, tx := range newBlock.GetBody().GetTxs() { - delete(oldTxs, types.ToTxID(tx.GetHash())) + if _, ok := oldTxs[types.ToTxID(tx.GetHash())]; ok { + overwrap++ + delete(oldTxs, types.ToTxID(tx.GetHash())) + } } dbTx := cs.cdb.store.NewTx() - defer dbTx.Discard() if err := cdb.addTxsOfBlock(&dbTx, newBlock.GetBody().GetTxs(), newBlock.BlockHash()); err != nil { + dbTx.Discard() return err } @@ -166,29 +320,18 @@ func (reorg *reorganizer) swapTxMapping() error { } // delete old tx mapping - txCnt := 0 - var dbTx db.Transaction + bulk := cdb.store.NewBulk() + defer bulk.DiscardLast() for _, oldTx := range oldTxs { - if dbTx == nil { - dbTx = cs.cdb.store.NewTx() - } - defer dbTx.Discard() - - cdb.deleteTx(&dbTx, oldTx) - - txCnt++ - - if txCnt >= TxBatchMax { - dbTx.Commit() - dbTx = nil - txCnt = 0 - } + bulk.Delete(oldTx.Hash) } + bulk.Flush() + //add rollbacked Tx to mempool (except played tx in roll forward) count := len(oldTxs) - logger.Debug().Int("tx count", count).Msg("tx add to mempool") + logger.Debug().Int("tx count", count).Int("overwrapped count", overwrap).Msg("tx add to mempool") if count > 0 { //txs := make([]*types.Tx, 0, count) @@ -215,11 +358,17 @@ func (reorg *reorganizer) dumpOldBlocks() { } // Find branch root and gather rollforard/rollback target blocks -func (reorg *reorganizer) gatherChainInfo() error { +func (reorg *reorganizer) gather() error { //find branch root block , gather rollforward Target block var err error cdb := reorg.cs.cdb + bestBlock, err := cdb.GetBestBlock() + if err != nil { + return err + } + reorg.bestBlock = bestBlock + brBlock := reorg.brTopBlock brBlockNo := brBlock.BlockNo() @@ -280,26 +429,71 @@ func (reorg *reorganizer) gatherChainInfo() error { return ErrNotExistBranchRoot } -func (reorg *reorganizer) rollbackChain() error { +// build reorg chain info from marker +func (reorg *reorganizer) gatherReco() error { + var err error + + cdb := reorg.cs.cdb + + startBlock := reorg.brStartBlock + bestBlock := reorg.bestBlock + topBlock := reorg.brTopBlock + + reorg.brStartBlock = startBlock + reorg.bestBlock = bestBlock + + gatherBlocksToStart := func(top *types.Block, stage string) ([]*types.Block, error) { + blocks := make([]*types.Block, 0) + + for tmpBlk := top; tmpBlk.GetHeader().GetBlockNo() > startBlock.GetHeader().GetBlockNo(); { + blocks = append(blocks, tmpBlk) + + logger.Debug().Str("stage", stage).Str("hash", tmpBlk.ID()).Uint64("blockNo", tmpBlk.GetHeader().GetBlockNo()). + Msg("gather target for reco") + + if tmpBlk, err = cdb.getBlock(tmpBlk.GetHeader().GetPrevBlockHash()); err != nil { + return blocks, err + } + } + + return blocks, nil + } + + reorg.oldBlocks, err = gatherBlocksToStart(bestBlock, "rollback") + if err != nil { + return err + } + + reorg.newBlocks, err = gatherBlocksToStart(topBlock, "rollforward") + if err != nil { + return err + } + + return nil +} + +func (reorg *reorganizer) rollback() error { brStartBlock := reorg.brStartBlock brStartBlockNo := brStartBlock.GetHeader().GetBlockNo() logger.Info().Str("hash", brStartBlock.ID()).Uint64("no", brStartBlockNo).Msg("rollback chain to branch start block") - if err := reorg.cs.sdb.Rollback(brStartBlock.GetHeader().GetBlocksRootHash()); err != nil { + if err := reorg.cs.sdb.SetRoot(brStartBlock.GetHeader().GetBlocksRootHash()); err != nil { return fmt.Errorf("failed to rollback sdb(branchRoot:no=%d,hash=%v)", brStartBlockNo, brStartBlock.ID()) } reorg.cs.Update(brStartBlock) + return nil +} + +func (reorg *reorganizer) deleteOldReceipts() { dbTx := reorg.cs.cdb.NewTx() for _, blk := range reorg.oldBlocks { reorg.cs.cdb.deleteReceipts(&dbTx, blk.GetHash(), blk.BlockNo()) } dbTx.Commit() - - return nil } /* @@ -307,17 +501,17 @@ func (reorg *reorganizer) rollbackChain() error { rollforwardBlock add oldTxs to mempool */ -func (reorg *reorganizer) rollforwardChain() error { - cs := reorg.cs +func (reorg *reorganizer) rollforward() error { + //cs := reorg.cs - logger.Info().Msg("rollforward chain started") + logger.Info().Bool("recover", reorg.recover).Msg("rollforward chain started") for i := len(reorg.newBlocks) - 1; i >= 0; i-- { newBlock := reorg.newBlocks[i] newBlockNo := newBlock.GetHeader().GetBlockNo() - if err := cs.executeBlock(nil, newBlock); err != nil { - logger.Error().Str("hash", newBlock.ID()).Uint64("no", newBlockNo). + if err := reorg.executeBlockFn(nil, newBlock); err != nil { + logger.Error().Bool("recover", reorg.recover).Str("hash", newBlock.ID()).Uint64("no", newBlockNo). Msg("failed to execute block in reorg") return err } diff --git a/chain/stat.go b/chain/stat.go new file mode 100644 index 000000000..5425a6002 --- /dev/null +++ b/chain/stat.go @@ -0,0 +1,161 @@ +package chain + +import ( + "encoding/json" + "sync" + "time" + + "github.com/aergoio/aergo/types" +) + +//go:generate stringer -type=statIndex +type statIndex int + +const ( + // Warning: Each statIndex contant has a String method, which is + // automically generated by 'stringer' with 'go generate' command. For the + // detail, check https://blog.golang.org/generate + + // ReorgStat is a constant representing a stat about reorganization. + ReorgStat statIndex = iota + // MaxStat is a constant representing a value less than which all the + // constants corresponding chain stats must be. + MaxStat +) + +var ( + // To add a new one to chain stats, implements statItem interface and add + // its constructor here. Additionally you need to add a constant + // corresponding to its index like statReorg above. + statItemCtors = map[statIndex]func() statItem{ + ReorgStat: newStReorg, + } +) + +type stats []*stat + +func newStats() stats { + s := make(stats, MaxStat) + for i := statIndex(0); i < MaxStat; i++ { + s[i] = newStat(statItemCtors[i]()) + } + return s +} + +func (s stats) JSON() string { + r := make(map[string]json.RawMessage) + for i := statIndex(0); i < MaxStat; i++ { + if b, err := json.Marshal(s.clone(i)); err == nil { + r[i.String()] = json.RawMessage(b) + } + } + if m, err := json.Marshal(r); err == nil { + return string(m) + } + return "" +} + +func (s stats) get(idx statIndex) *stat { + return []*stat(s)[idx] +} + +func (s stats) clone(idx statIndex) interface{} { + i := s.get(idx) + i.RLock() + defer i.RUnlock() + return i.clone() +} + +func (s stats) updateEvent(idx statIndex, args ...interface{}) { + i := s.get(idx) + i.Lock() + defer i.Unlock() + + i.updateEvent(args...) +} + +type stat struct { + sync.RWMutex + statItem +} + +func newStat(i statItem) *stat { + return &stat{statItem: i} +} + +type statItem interface { + updateEvent(args ...interface{}) + clone() interface{} +} + +type stReorg struct { + totalElapsed time.Duration + Count int64 + AverageElapsed float64 `json:"Average Elapsed Time,omitempty"` + Latest *evReorg `json:",omitempty"` +} + +func newStReorg() statItem { + return &stReorg{} +} + +type evReorg struct { + OldBest *blockInfo `json:"Old Best,omitempty"` + Fork *blockInfo `json:"Fork At,omitempty"` + NewBest *blockInfo `json:"New Best,omitempty"` + Time time.Time +} + +type blockInfo struct { + Hash string + Height types.BlockNo +} + +func (sr *stReorg) getCount() int64 { + return sr.Count +} + +func (sr *stReorg) getLatestEvent() interface{} { + return sr.Latest +} + +func (sr *stReorg) updateEvent(args ...interface{}) { + if len(args) != 4 { + logger.Info().Int("len", len(args)).Msg("invalid # of arguments for the reorg stat update") + return + } + + et := args[0].(time.Duration) + + bi := make([]*blockInfo, len(args)) + for i, a := range args[1:] { + var block *types.Block + ok := false + if block, ok = a.(*types.Block); !ok { + logger.Info().Int("arg idx", i).Msg("invalid type of argument") + return + } + bi[i] = &blockInfo{Hash: block.ID(), Height: block.BlockNo()} + } + + sr.Latest = &evReorg{ + OldBest: bi[0], + NewBest: bi[1], + Fork: bi[2], + Time: time.Now(), + } + + sr.totalElapsed += et + sr.Count++ + sr.AverageElapsed = (sr.totalElapsed / time.Duration(sr.Count)).Seconds() +} + +func (sr *stReorg) clone() interface{} { + c := *sr + if sr.Latest != nil { + l := *sr.Latest + c.Latest = &l + } + + return &c +} diff --git a/chain/stat_test.go b/chain/stat_test.go new file mode 100644 index 000000000..3dd2bc52d --- /dev/null +++ b/chain/stat_test.go @@ -0,0 +1,49 @@ +package chain + +import ( + "encoding/json" + "fmt" + "testing" + "time" + + "github.com/aergoio/aergo/types" + "github.com/stretchr/testify/assert" +) + +var block = types.NewBlock(nil, nil, nil, nil, nil, 0) + +func TestChainStatReorgClone(t *testing.T) { + var chk = assert.New(t) + + stats := newStats() + i := ReorgStat + + r := stats.clone(i) + chk.NotNil(r) + b, err := json.Marshal(r) + chk.Nil(err) + fmt.Println(string(b)) + + stats.updateEvent(i, time.Second*10, block, block, block) + stats.updateEvent(i, time.Second*10, block, block, block) + stats.updateEvent(i, time.Second*10, block, block, block) + r = stats.clone(i) + chk.NotNil(r) + b, err = json.Marshal(r) + chk.Nil(err) + fmt.Println(string(b)) +} + +func TestChainStatJSON(t *testing.T) { + var chk = assert.New(t) + + stats := newStats() + i := ReorgStat + stats.updateEvent(i, time.Second*10, block, block, block) + stats.updateEvent(i, time.Second*10, block, block, block) + stats.updateEvent(i, time.Second*10, block, block, block) + + s := stats.JSON() + chk.NotZero(len(s)) + fmt.Println(s) +} diff --git a/chain/statindex_string.go b/chain/statindex_string.go new file mode 100644 index 000000000..544b45e35 --- /dev/null +++ b/chain/statindex_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=statIndex"; DO NOT EDIT. + +package chain + +import "strconv" + +const _statIndex_name = "ReorgStatMaxStat" + +var _statIndex_index = [...]uint8{0, 9, 16} + +func (i statIndex) String() string { + if i < 0 || i >= statIndex(len(_statIndex_index)-1) { + return "statIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _statIndex_name[_statIndex_index[i]:_statIndex_index[i+1]] +} diff --git a/chain/stubchain.go b/chain/stubchain.go index b6bdfa179..6cd7184e6 100644 --- a/chain/stubchain.go +++ b/chain/stubchain.go @@ -23,11 +23,15 @@ var ( ErrNotExistBlock = errors.New("not exist block of the hash") ) -func NewStubBlockChain() *StubBlockChain { +func NewStubBlockChain(size int) *StubBlockChain { + if size < 10000 { + size = 10000 + } + tchain := &StubBlockChain{Best: -1} - tchain.Hashes = make([][]byte, 10240) - tchain.Blocks = make([]*types.Block, 10240) + tchain.Hashes = make([][]byte, size+1) + tchain.Blocks = make([]*types.Block, size+1) return tchain } @@ -118,6 +122,10 @@ func (tchain *StubBlockChain) GetConsensusInfo() string { return "" } +func (tchain *StubBlockChain) GetChainStats() string { + return "" +} + func (tchain *StubBlockChain) GetBestBlock() (*types.Block, error) { return tchain.BestBlock, nil } @@ -199,7 +207,7 @@ func (tchain *StubBlockChain) Rollback(ancestor *types.BlockInfo) { } func InitStubBlockChain(prefixChain []*types.Block, genCount int) *StubBlockChain { - newChain := NewStubBlockChain() + newChain := NewStubBlockChain(genCount + len(prefixChain) + 1) //load initial Blocks for _, block := range prefixChain { diff --git a/cmd/aergocli/cmd/chainstat.go b/cmd/aergocli/cmd/chainstat.go new file mode 100644 index 000000000..b94f25ff7 --- /dev/null +++ b/cmd/aergocli/cmd/chainstat.go @@ -0,0 +1,29 @@ +/** + * @file + * @copyright defined in aergo/LICENSE.txt + */ +package cmd + +import ( + "context" + + aergorpc "github.com/aergoio/aergo/types" + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(chainstatCmd) +} + +var chainstatCmd = &cobra.Command{ + Use: "chainstat", + Short: "Print chain statistics", + Run: func(cmd *cobra.Command, args []string) { + msg, err := client.ChainStat(context.Background(), &aergorpc.Empty{}) + if err != nil { + cmd.Printf("Failed: %s\n", err.Error()) + return + } + cmd.Println(msg.Report) + }, +} diff --git a/cmd/aergocli/cmd/cluster.go b/cmd/aergocli/cmd/cluster.go new file mode 100644 index 000000000..f12fadd9e --- /dev/null +++ b/cmd/aergocli/cmd/cluster.go @@ -0,0 +1,88 @@ +package cmd + +import ( + "context" + aergorpc "github.com/aergoio/aergo/types" + "github.com/spf13/cobra" + "strconv" +) + +var ( + nodename string + nodeidStr string + url string + peerid string +) + +func init() { + clusterCmd := &cobra.Command{ + Use: "cluster [flags] subcommand", + Short: "Cluster command for raft consensus", + } + + addCmd.Flags().StringVar(&nodename, "name", "", "node name to add to the cluster") + addCmd.MarkFlagRequired("name") + addCmd.Flags().StringVar(&url, "url", "", "node url to add to the cluster") + addCmd.MarkFlagRequired("url") + addCmd.Flags().StringVar(&peerid, "peerid", "", "peer id of node to add to the cluster") + addCmd.MarkFlagRequired("peerid") + + removeCmd.Flags().StringVar(&nodeidStr, "nodeid", "", "node id to remove to the cluster") + removeCmd.MarkFlagRequired("nodeid") + + clusterCmd.AddCommand(addCmd, removeCmd) + rootCmd.AddCommand(clusterCmd) +} + +var addCmd = &cobra.Command{ + Use: "add [flags]", + Short: "Add new member node to cluster. This command can only be used for raft consensus.", + Run: func(cmd *cobra.Command, args []string) { + if len(nodename) == 0 || len(url) == 0 || len(peerid) == 0 { + cmd.Printf("Failed: name, len, peerid flag must have value\n") + return + } + + var changeReq = &aergorpc.MembershipChange{ + Type: aergorpc.MembershipChangeType_ADD_MEMBER, + Attr: &aergorpc.MemberAttr{Name: nodename, Url: url, PeerID: []byte(peerid)}, + } + reply, err := client.ChangeMembership(context.Background(), changeReq) + if err != nil { + cmd.Printf("Failed to add member: %s\n", err.Error()) + return + } + + cmd.Printf("added member to cluster: %s\n", reply.Attr.ToString()) + return + }, +} + +var removeCmd = &cobra.Command{ + Use: "remove [flags]", + Short: "Remove raft node with given node id from cluster. This command can only be used for raft consensus.", + Run: func(cmd *cobra.Command, args []string) { + if len(nodeidStr) == 0 { + cmd.Printf("Failed: nodeid flag must be string of hex format\n") + return + } + + nodeid, err := strconv.ParseUint(nodeidStr, 16, 64) + if err != nil { + cmd.Printf("Failed to add member: %s\n", err.Error()) + return + } + + changeReq := &aergorpc.MembershipChange{ + Type: aergorpc.MembershipChangeType_REMOVE_MEMBER, + Attr: &aergorpc.MemberAttr{ID: nodeid}, + } + reply, err := client.ChangeMembership(context.Background(), changeReq) + if err != nil { + cmd.Printf("Failed to remove member: %s\n", err.Error()) + } + + cmd.Printf("removed member from cluster: %s\n", reply.Attr.ToString()) + return + }, +} diff --git a/cmd/aergocli/cmd/contract.go b/cmd/aergocli/cmd/contract.go index c381ac6cd..7ea7b17df 100644 --- a/cmd/aergocli/cmd/contract.go +++ b/cmd/aergocli/cmd/contract.go @@ -12,6 +12,7 @@ import ( "os" "github.com/aergoio/aergo/cmd/aergocli/util" + luacEncoding "github.com/aergoio/aergo/cmd/aergoluac/encoding" "github.com/aergoio/aergo/types" "github.com/mr-tron/base58/base58" "github.com/spf13/cobra" @@ -32,10 +33,10 @@ func init() { } deployCmd := &cobra.Command{ - Use: "deploy [flags] --payload 'payload string' creator\n aergocli contract deploy [flags] creator bcfile abifile", - Short: "Deploy a compiled contract to the server", - Args: cobra.MinimumNArgs(1), - Run: runDeployCmd, + Use: "deploy [flags] --payload 'payload string' creator\n aergocli contract deploy [flags] creator bcfile abifile", + Short: "Deploy a compiled contract to the server", + Args: cobra.MinimumNArgs(1), + Run: runDeployCmd, DisableFlagsInUseLine: true, } deployCmd.PersistentFlags().StringVar(&data, "payload", "", "result of compiling a contract") @@ -95,7 +96,7 @@ func runDeployCmd(cmd *cobra.Command, args []string) { var payload []byte if len(data) == 0 { if len(args) < 3 { - fmt.Fprint(os.Stderr, "Usage: aergocli contract deploy [args]") + _, _ = fmt.Fprint(os.Stderr, "Usage: aergocli contract deploy [args]") os.Exit(1) } var code []byte @@ -136,7 +137,7 @@ func runDeployCmd(cmd *cobra.Command, args []string) { } argLen = len(args[1]) } - code, err := util.DecodeCode(data) + code, err := luacEncoding.DecodeCode(data) payload = make([]byte, 4+len(code)+argLen) binary.LittleEndian.PutUint32(payload[0:], uint32(len(code)+4)) codeLen := copy(payload[4:], code) @@ -145,13 +146,13 @@ func runDeployCmd(cmd *cobra.Command, args []string) { } if err != nil { - fmt.Fprint(os.Stderr, err) + _, _ = fmt.Fprint(os.Stderr, err) os.Exit(1) } } amountBigInt, ok := new(big.Int).SetString(amount, 10) if !ok { - fmt.Fprint(os.Stderr, "failed to parse --amount flags") + _, _ = fmt.Fprint(os.Stderr, "failed to parse --amount flags") os.Exit(1) } tx := &types.Tx{ @@ -219,7 +220,7 @@ func runCallCmd(cmd *cobra.Command, args []string) { amountBigInt, ok := new(big.Int).SetString(amount, 10) if !ok { - fmt.Fprint(os.Stderr, "failed to parse --amount flags") + _, _ = fmt.Fprint(os.Stderr, "failed to parse --amount flags") os.Exit(1) } txType := types.TxType_NORMAL @@ -241,7 +242,7 @@ func runCallCmd(cmd *cobra.Command, args []string) { if chainIdHash != "" { rawCidHash, err := base58.Decode(chainIdHash) if err != nil { - fmt.Fprint(os.Stderr, "failed to parse --chainidhash flags\n") + _, _ = fmt.Fprint(os.Stderr, "failed to parse --chainidhash flags\n") os.Exit(1) } tx.Body.ChainIdHash = rawCidHash diff --git a/cmd/aergocli/cmd/getblock.go b/cmd/aergocli/cmd/getblock.go index 2402da6da..433d3dac1 100644 --- a/cmd/aergocli/cmd/getblock.go +++ b/cmd/aergocli/cmd/getblock.go @@ -22,6 +22,7 @@ var getblockCmd = &cobra.Command{ Run: execGetBlock, } +var stream bool var number uint64 var hash string @@ -29,9 +30,30 @@ func init() { rootCmd.AddCommand(getblockCmd) getblockCmd.Flags().Uint64VarP(&number, "number", "n", 0, "Block height") getblockCmd.Flags().StringVarP(&hash, "hash", "", "", "Block hash") + getblockCmd.Flags().BoolVar(&stream, "stream", false, "Get the block information by streamming") } func execGetBlock(cmd *cobra.Command, args []string) { + if stream { + bs, err := client.ListBlockStream(context.Background(), &aergorpc.Empty{}) + if err != nil { + cmd.Printf("Failed: %s\n", err.Error()) + return + } + if err != nil { + cmd.Printf("Failed: %s", err.Error()) + return + } + for { + b, err := bs.Recv() + if err != nil { + cmd.Printf("Failed: %s\n", err.Error()) + return + } + cmd.Println(util.BlockConvBase58Addr(b)) + } + return + } fflags := cmd.Flags() if fflags.Changed("number") == false && fflags.Changed("hash") == false { cmd.Println("no block --hash or --number specified") diff --git a/cmd/aergocli/cmd/getpeers.go b/cmd/aergocli/cmd/getpeers.go index f35ccf4d8..70292f367 100644 --- a/cmd/aergocli/cmd/getpeers.go +++ b/cmd/aergocli/cmd/getpeers.go @@ -9,8 +9,8 @@ import ( "bytes" "context" "os" - "strings" "sort" + "strings" "github.com/aergoio/aergo/cmd/aergocli/util" "github.com/aergoio/aergo/types" @@ -28,11 +28,12 @@ var showself bool var sortFlag string const ( - sortAddr = "addr" - sortID = "id" - sortHeight = "height" + sortAddr = "addr" + sortID = "id" + sortHeight = "height" sortDefault = "no" ) + func init() { rootCmd.AddCommand(getpeersCmd) getpeersCmd.Flags().BoolVar(&nohidden, "nohidden", false, "exclude hidden peers") @@ -67,7 +68,7 @@ func GetSorter(cmd *cobra.Command, flag string) peerSorter { case sortDefault: return noSorter{} default: - cmd.Println("Invalid sort type",flag) + cmd.Println("Invalid sort type", flag) os.Exit(1) return noSorter{} } @@ -76,20 +77,27 @@ func GetSorter(cmd *cobra.Command, flag string) peerSorter { type peerSorter interface { Sort([]*types.Peer) } -type addrSorter struct {} -func (addrSorter)Sort(peerArr []*types.Peer) { +type addrSorter struct{} + +func (addrSorter) Sort(peerArr []*types.Peer) { sort.Sort(byAddr(peerArr)) } + type idSorter struct{} -func (idSorter)Sort(peerArr []*types.Peer) { + +func (idSorter) Sort(peerArr []*types.Peer) { sort.Sort(byID(peerArr)) } + type heightSorter struct{} -func (heightSorter)Sort(peerArr []*types.Peer) { + +func (heightSorter) Sort(peerArr []*types.Peer) { sort.Sort(byHeight(peerArr)) } + type noSorter struct{} -func (noSorter)Sort(peerArr []*types.Peer) { + +func (noSorter) Sort(peerArr []*types.Peer) { // Do nothing. no sort } diff --git a/cmd/aergocli/cmd/mock_types/mock_types.go b/cmd/aergocli/cmd/mock_types/mock_types.go index 4eeb9c25d..5a50104f0 100644 --- a/cmd/aergocli/cmd/mock_types/mock_types.go +++ b/cmd/aergocli/cmd/mock_types/mock_types.go @@ -53,6 +53,42 @@ func (mr *MockAergoRPCServiceClientMockRecorder) Blockchain(arg0, arg1 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Blockchain", reflect.TypeOf((*MockAergoRPCServiceClient)(nil).Blockchain), varargs...) } +// ChainStat mocks base method +func (m *MockAergoRPCServiceClient) ChainStat(arg0 context.Context, arg1 *types.Empty, arg2 ...grpc.CallOption) (*types.ChainStats, error) { + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ChainStat", varargs...) + ret0, _ := ret[0].(*types.ChainStats) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainStat indicates an expected call of ChainStat +func (mr *MockAergoRPCServiceClientMockRecorder) ChainStat(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainStat", reflect.TypeOf((*MockAergoRPCServiceClient)(nil).ChainStat), varargs...) +} + +// ChangeMembership mocks base method +func (m *MockAergoRPCServiceClient) ChangeMembership(arg0 context.Context, arg1 *types.MembershipChange, arg2 ...grpc.CallOption) (*types.MembershipChangeReply, error) { + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ChangeMembership", varargs...) + ret0, _ := ret[0].(*types.MembershipChangeReply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChangeMembership indicates an expected call of ChangeMembership +func (mr *MockAergoRPCServiceClientMockRecorder) ChangeMembership(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChangeMembership", reflect.TypeOf((*MockAergoRPCServiceClient)(nil).ChangeMembership), varargs...) +} + // CommitTX mocks base method func (m *MockAergoRPCServiceClient) CommitTX(arg0 context.Context, arg1 *types.TxList, arg2 ...grpc.CallOption) (*types.CommitResultList, error) { varargs := []interface{}{arg0, arg1} diff --git a/cmd/aergocli/cmd/name.go b/cmd/aergocli/cmd/name.go index 1ef0180b1..acb57f69c 100644 --- a/cmd/aergocli/cmd/name.go +++ b/cmd/aergocli/cmd/name.go @@ -22,6 +22,7 @@ var nameCmd = &cobra.Command{ Short: "Name command", } var spending string +var blockNo uint64 func init() { rootCmd.AddCommand(nameCmd) @@ -59,6 +60,7 @@ func init() { } ownerCmd.Flags().StringVar(&name, "name", "", "Name of account to create") ownerCmd.MarkFlagRequired("name") + ownerCmd.Flags().Uint64VarP(&blockNo, "blockno", "n", 0, "Block height") nameCmd.AddCommand(newCmd, updateCmd, ownerCmd) } @@ -160,7 +162,7 @@ func execNameUpdate(cmd *cobra.Command, args []string) error { } func execNameOwner(cmd *cobra.Command, args []string) { - msg, err := client.GetNameInfo(context.Background(), &types.Name{Name: name}) + msg, err := client.GetNameInfo(context.Background(), &types.Name{Name: name, BlockNo: blockNo}) if err != nil { cmd.Println(err.Error()) return diff --git a/cmd/aergocli/cmd/serverinfo.go b/cmd/aergocli/cmd/serverinfo.go index 4904d0056..8ad010c1f 100644 --- a/cmd/aergocli/cmd/serverinfo.go +++ b/cmd/aergocli/cmd/serverinfo.go @@ -21,6 +21,7 @@ var ( Run: execServerInfo, } ) + func init() { rootCmd.AddCommand(serverinfoCmd) } @@ -29,7 +30,6 @@ func execServerInfo(cmd *cobra.Command, args []string) { var b []byte var params types.KeyParams - b = make([]byte, 8) binary.LittleEndian.PutUint64(b, uint64(number)) @@ -38,7 +38,7 @@ func execServerInfo(cmd *cobra.Command, args []string) { cmd.Printf("Failed: %s\n", err.Error()) return } - buf, err := json.MarshalIndent(msg, ""," ") + buf, err := json.MarshalIndent(msg, "", " ") if err != nil { cmd.Printf("Failed: invalid server response %s\n", err.Error()) return diff --git a/cmd/aergocli/util/base58addr.go b/cmd/aergocli/util/base58addr.go index 95344fdce..0bd2fe9aa 100644 --- a/cmd/aergocli/util/base58addr.go +++ b/cmd/aergocli/util/base58addr.go @@ -1,16 +1,13 @@ package util import ( - "encoding/hex" "encoding/json" "errors" - "fmt" "math/big" "strconv" "time" "github.com/aergoio/aergo/types" - "github.com/anaskhan96/base58check" "github.com/mr-tron/base58/base58" ) @@ -84,6 +81,7 @@ type InOutPeer struct { State string Hidden bool Self bool + Version string } func FillTxBody(source *InOutTxBody, target *types.TxBody) error { @@ -254,6 +252,11 @@ func ConvPeer(p *types.Peer) *InOutPeer { out.State = types.PeerState(p.State).String() out.Hidden = p.Hidden out.Self = p.Selfpeer + if p.Version != "" { + out.Version = p.Version + } else { + out.Version = "(old)" + } return out } @@ -264,11 +267,18 @@ func ConvBlockchainStatus(in *types.BlockchainStatus) string { } out.Hash = base58.Encode(in.BestBlockHash) out.Height = in.BestHeight - if len(in.ConsensusInfo) > 0 { - ci := json.RawMessage(in.ConsensusInfo) - out.ConsensusInfo = &ci - } + out.ChainIdHash = base58.Encode(in.BestChainIdHash) + + toJRM := func(s string) *json.RawMessage { + if len(s) > 0 { + m := json.RawMessage(s) + return &m + } + return nil + } + out.ConsensusInfo = toJRM(in.ConsensusInfo) + jsonout, err := json.Marshal(out) if err != nil { return "" @@ -303,27 +313,3 @@ func toString(out interface{}) string { } return string(jsonout) } - -const CodeVersion = 0xC0 - -func EncodeCode(code []byte) string { - encoded, _ := base58check.Encode(fmt.Sprintf("%x", CodeVersion), hex.EncodeToString(code)) - return encoded -} - -func DecodeCode(encodedCode string) ([]byte, error) { - decodedString, err := base58check.Decode(encodedCode) - if err != nil { - return nil, err - } - decodedBytes, err := hex.DecodeString(decodedString) - if err != nil { - return nil, err - } - version := decodedBytes[0] - if version != CodeVersion { - return nil, errors.New("Invalid code version") - } - decoded := decodedBytes[1:] - return decoded, nil -} diff --git a/cmd/aergocli/util/base64ToHex.go b/cmd/aergocli/util/base64ToHex.go index 66cb56b09..0df44e04f 100644 --- a/cmd/aergocli/util/base64ToHex.go +++ b/cmd/aergocli/util/base64ToHex.go @@ -12,6 +12,7 @@ type InOutBlockchainStatus struct { Height uint64 ConsensusInfo *json.RawMessage `json:",omitempty"` ChainIdHash string + ChainStat *json.RawMessage `json:",omitempty"` } func ConvHexBlockchainStatus(in *types.BlockchainStatus) string { diff --git a/cmd/aergoluac/encoding/codeEncoding.go b/cmd/aergoluac/encoding/codeEncoding.go new file mode 100644 index 000000000..cdc0af287 --- /dev/null +++ b/cmd/aergoluac/encoding/codeEncoding.go @@ -0,0 +1,32 @@ +package encoding + +import ( + "encoding/hex" + "errors" + "fmt" + "github.com/anaskhan96/base58check" +) + +const CodeVersion = 0xC0 + +func EncodeCode(code []byte) string { + encoded, _ := base58check.Encode(fmt.Sprintf("%x", CodeVersion), hex.EncodeToString(code)) + return encoded +} + +func DecodeCode(encodedCode string) ([]byte, error) { + decodedString, err := base58check.Decode(encodedCode) + if err != nil { + return nil, err + } + decodedBytes, err := hex.DecodeString(decodedString) + if err != nil { + return nil, err + } + version := decodedBytes[0] + if version != CodeVersion { + return nil, errors.New("Invalid code version") + } + decoded := decodedBytes[1:] + return decoded, nil +} diff --git a/cmd/aergoluac/main.go b/cmd/aergoluac/main.go index 8a0169a0d..c331a4e0e 100644 --- a/cmd/aergoluac/main.go +++ b/cmd/aergoluac/main.go @@ -18,8 +18,11 @@ var ( rootCmd *cobra.Command abiFile string payload bool + version bool ) +var githash = "No git hash provided" + func init() { rootCmd = &cobra.Command{ Use: "aergoluac --payload srcfile\n aergoluac --abi abifile srcfile bcfile", @@ -27,6 +30,11 @@ func init() { Long: "Compile a lua contract. This command makes a bytecode file and a ABI file or prints a payload data.", RunE: func(cmd *cobra.Command, args []string) error { var err error + + if version { + cmd.Printf("Aergoluac %s\n", githash) + return nil + } if payload { if len(args) == 0 { err = util.DumpFromStdin() @@ -47,6 +55,7 @@ func init() { } rootCmd.PersistentFlags().StringVarP(&abiFile, "abi", "a", "", "abi filename") rootCmd.PersistentFlags().BoolVar(&payload, "payload", false, "print the compilation result consisting of bytecode and abi") + rootCmd.PersistentFlags().BoolVar(&version, "version", false, "print the version number of aergoluac") } func main() { diff --git a/cmd/aergoluac/util/luac_util.go b/cmd/aergoluac/util/luac_util.go index 5eba57a80..c61404d22 100644 --- a/cmd/aergoluac/util/luac_util.go +++ b/cmd/aergoluac/util/luac_util.go @@ -14,12 +14,11 @@ import ( "encoding/binary" "errors" "fmt" + "github.com/aergoio/aergo/cmd/aergoluac/encoding" "io/ioutil" "os" "runtime" "unsafe" - - "github.com/aergoio/aergo/cmd/aergocli/util" ) var ( @@ -83,7 +82,7 @@ func DumpFromFile(srcFileName string) error { return errors.New(C.GoString(errMsg)) } - fmt.Println(util.EncodeCode(b.Bytes())) + fmt.Println(encoding.EncodeCode(b.Bytes())) return nil } @@ -120,7 +119,7 @@ func DumpFromStdin() error { if errMsg := C.vm_stringdump(L); errMsg != nil { return errors.New(C.GoString(errMsg)) } - fmt.Println(util.EncodeCode(b.Bytes())) + fmt.Println(encoding.EncodeCode(b.Bytes())) return nil } diff --git a/cmd/aergoluac/util/state_module.c b/cmd/aergoluac/util/state_module.c index da7db5379..62058b9c4 100644 --- a/cmd/aergoluac/util/state_module.c +++ b/cmd/aergoluac/util/state_module.c @@ -3,6 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ +#include #include #include #include @@ -21,9 +22,12 @@ static int state_map(lua_State *L) static int state_array(lua_State *L) { - int len = 0; + int32_t len = 0; int is_fixed = lua_gettop(L) != 0; if (is_fixed) { + if (!luaL_isinteger(L, 1)) { + luaL_typerror(L, 1, "integer"); + } len = luaL_checkint(L, 1); /* size */ luaL_argcheck(L, (len > 0), 1, "the array length must be greater than zero"); } diff --git a/cmd/aergosvr/aergosvr.go b/cmd/aergosvr/aergosvr.go index 4f7e0df41..af6e5c494 100644 --- a/cmd/aergosvr/aergosvr.go +++ b/cmd/aergosvr/aergosvr.go @@ -6,6 +6,7 @@ package main import ( "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" "net/http" _ "net/http/pprof" "os" @@ -22,8 +23,8 @@ import ( "github.com/aergoio/aergo/internal/common" "github.com/aergoio/aergo/mempool" "github.com/aergoio/aergo/p2p" - "github.com/aergoio/aergo/p2p/pmap" "github.com/aergoio/aergo/pkg/component" + polarisclient "github.com/aergoio/aergo/polaris/client" "github.com/aergoio/aergo/rpc" "github.com/aergoio/aergo/syncer" "github.com/opentracing/opentracing-go" @@ -148,7 +149,7 @@ func rootRun(cmd *cobra.Command, args []string) { svrlog.Warn().Msgf("Running with unsafe test mode. Turn off test mode for production use!") } - p2p.InitNodeInfo(&cfg.BaseConfig, cfg.P2P, svrlog) + p2pkey.InitNodeInfo(&cfg.BaseConfig, cfg.P2P, githash, svrlog) compMng := component.NewComponentHub() @@ -158,7 +159,7 @@ func rootRun(cmd *cobra.Command, args []string) { rpcSvc := rpc.NewRPC(cfg, chainSvc, githash) syncSvc := syncer.NewSyncer(cfg, chainSvc, nil) p2pSvc := p2p.NewP2P(cfg, chainSvc) - pmapSvc := pmap.NewPolarisConnectSvc(cfg.P2P, p2pSvc) + pmapSvc := polarisclient.NewPolarisConnectSvc(cfg.P2P, p2pSvc) var accountSvc component.IComponent if cfg.Personal { @@ -169,7 +170,7 @@ func rootRun(cmd *cobra.Command, args []string) { // function skips nil parameters. compMng.Register(chainSvc, mpoolSvc, rpcSvc, syncSvc, p2pSvc, accountSvc, pmapSvc) - consensusSvc, err := impl.New(cfg, compMng, chainSvc, p2pSvc.GetPeerAccessor(), rpcSvc) + consensusSvc, err := impl.New(cfg, compMng, chainSvc, p2pSvc, rpcSvc) if err != nil { svrlog.Error().Err(err).Msg("Failed to start consensus service.") os.Exit(1) diff --git a/cmd/brick/README.md b/cmd/brick/README.md index 0320f396c..7ce136301 100644 --- a/cmd/brick/README.md +++ b/cmd/brick/README.md @@ -76,6 +76,13 @@ deploy a smart contract. `deploy deploy tester 0 helloContract https://raw.githubusercontent.com/aergoio/aergo-contract-ex/master/contracts/helloworld/test/test-helloworld.brick + INF deploy a smart contract successfully cmd=deploy module=brick +``` + ### call call to execute a smart contract. `call [expected_error]` @@ -105,7 +112,7 @@ query to a smart contract. `query [ ### batch -keeps commands in a text file and use at later. `batch ` +keeps commands in a text file (local or http) and use at later. `batch ` ``` lua 4> batch ./example/hello.brick @@ -122,12 +129,21 @@ cancels the last tx (inject, send, deploy, call). `undo` INF Undo, Succesfully cmd=undo module=brick ``` +### forward + +skip blocks. `forward [height_to_skip]` + +``` lua +7> forward 100 + INF fast forward blocks successfully cmd=forward module=brick +``` + ### reset clear all txs and reset the chain. `reset` ``` lua -7> reset +107> reset INF reset a dummy chain successfully cmd=reset module=brick 0> ``` @@ -177,11 +193,11 @@ Or user can set the option `-w` to display the batch execution results continuou ## Debugging -If you build in debug mode, you can use `os, io, debug` modules which is not allowed in release mode. There is no limit to which debugger to use, but brick provides built-in debugger using customized [clidebugger](https://github.com/ToddWegner/clidebugger). For debugging purpose, brick has extended commands. +If you build in debug mode (`make debug`), you can use `os, io, debug` modules which is not allowed in release mode. There is no limit to which debugger to use, but brick provides built-in debugger using customized [clidebugger](https://github.com/ToddWegner/clidebugger). For debugging purpose, brick has extended commands. ### setb (brick / debugmode) -Set a breakpoint to the contract. When vm reach the line of breakpoint during a call or query of a contract, it enters debugmode. contract_name is optional in debugmode. `setb [contract_name]` +Set a breakpoint to the contract. When vm reach the line of breakpoint during a call or query of a contract, it enters debugmode only. contract_name is optional in debugmode. `setb [contract_name]` ### delb (brick / debugmode) @@ -195,7 +211,24 @@ Prints all breakpoints. `listb` Clear all breakpoints. `resetb` +### setw (brick / debugmode) + +Set an watchpoint expression. If one of watchpoint expressions is satisfied without regard to which contract is being executed, debug mode is activated. `setw ` + +### delw (brick / debugmode) + +Delete an existing watchpoint. `delw ` + +### listw (brick / debugmode) + +Prints all watchpoints. `listw` + +### resetw (brick / debugmode) + +Clear all watchpoints. `resetw` + ### in debugmode + When vm enters debugmode, prompt changes to `[DEBUG]>`. In debugmode, command set is changed for debugging purpose, like `run`, `exit`, `show`, `vars`. For more detail, type `help`. ## Debug using Zerobrane Studio diff --git a/cmd/brick/exec/batch.go b/cmd/brick/exec/batch.go index d1b63c3cc..5c0d53929 100644 --- a/cmd/brick/exec/batch.go +++ b/cmd/brick/exec/batch.go @@ -3,8 +3,10 @@ package exec import ( "bufio" "fmt" + "net/http" "os" "path/filepath" + "strings" "github.com/aergoio/aergo/cmd/brick/context" "github.com/fsnotify/fsnotify" @@ -64,23 +66,60 @@ func (c *batch) Validate(args string) error { return err } +func (c *batch) readBatchFile(batchFilePath string) ([]string, error) { + if strings.HasPrefix(batchFilePath, "http") { + // search in the web + req, err := http.NewRequest("GET", batchFilePath, nil) + if err != nil { + return nil, err + } + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var cmdLines []string + scanner := bufio.NewScanner(resp.Body) + for scanner.Scan() { + cmdLines = append(cmdLines, scanner.Text()) + } + + return cmdLines, nil + } + + batchFile, err := os.Open(batchFilePath) + if err != nil { + return nil, err + } + defer batchFile.Close() + + var cmdLines []string + scanner := bufio.NewScanner(batchFile) + for scanner.Scan() { + cmdLines = append(cmdLines, scanner.Text()) + } + + return cmdLines, nil +} + func (c *batch) parse(args string) (string, error) { splitArgs := context.SplitSpaceAndAccent(args, false) if len(splitArgs) != 1 { return "", fmt.Errorf("invalid format. usage: %s", c.Usage()) } - batchFilePath := splitArgs[0] + batchFilePath := splitArgs[0].Text - if _, err := os.Stat(batchFilePath.Text); os.IsNotExist(err) { - return "", fmt.Errorf("fail to read a brick batch file %s: %s", batchFilePath.Text, err.Error()) + if _, err := c.readBatchFile(batchFilePath); err != nil { + return "", fmt.Errorf("fail to read a brick batch file %s: %s", batchFilePath, err.Error()) } - return batchFilePath.Text, nil + return batchFilePath, nil } func (c *batch) Run(args string) (string, error) { - batchFilePath, _ := c.parse(args) stdOut := colorable.NewColorableStdout() var err error @@ -97,28 +136,21 @@ func (c *batch) Run(args string) (string, error) { prefix := "" - batchFile, err := os.Open(batchFilePath) + batchFilePath, _ := c.parse(args) + cmdLines, err := c.readBatchFile(batchFilePath) if err != nil { return "", err } - var cmdLines []string - scanner := bufio.NewScanner(batchFile) - for scanner.Scan() { - cmdLines = append(cmdLines, scanner.Text()) - } - - batchFile.Close() - c.level++ // set highest log level to turn off verbose if false == verboseBatch { zerolog.SetGlobalLevel(zerolog.ErrorLevel) - fmt.Fprintf(stdOut, "> %s\n", batchFile.Name()) + fmt.Fprintf(stdOut, "> %s\n", batchFilePath) } else if verboseBatch && c.level != 1 { prefix = fmt.Sprintf("%d-", c.level-1) - fmt.Fprintf(stdOut, "\n<<<<<<< %s\n", batchFile.Name()) + fmt.Fprintf(stdOut, "\n<<<<<<< %s\n", batchFilePath) } for i, line := range cmdLines { @@ -144,13 +176,13 @@ func (c *batch) Run(args string) (string, error) { if letBatchKnowErr != nil { // if there is error during execution, then print line for error trace - fmt.Fprintf(stdOut, "\x1B[0;37m%s:%d \x1B[34;1m%s \x1B[0m%s\n\n", batchFile.Name(), lineNum, cmd, args) + fmt.Fprintf(stdOut, "\x1B[0;37m%s:%d \x1B[34;1m%s \x1B[0m%s\n\n", batchFilePath, lineNum, cmd, args) letBatchKnowErr = nil } } if c.level != 1 && verboseBatch { - fmt.Fprintf(stdOut, ">>>>>>> %s\n", batchFile.Name()) + fmt.Fprintf(stdOut, ">>>>>>> %s\n", batchFilePath) } c.level-- @@ -168,13 +200,12 @@ func (c *batch) Run(args string) (string, error) { } // add file to watch list - if enableWatch { - absPath, _ := filepath.Abs(batchFile.Name()) + if enableWatch && !strings.HasPrefix(batchFilePath, "http") { + absPath, _ := filepath.Abs(batchFilePath) watcher.Add(absPath) } if c.level == 0 && enableWatch { - defer watcher.Close() // wait and check file changes fileWatching: for { @@ -183,11 +214,12 @@ func (c *batch) Run(args string) (string, error) { break fileWatching case err, _ := <-watcher.Errors: if err != nil { - fmt.Fprintf(stdOut, "\x1B[0;37mWatching File %s Error: %s\x1B[0m\n", batchFile.Name(), err.Error()) + fmt.Fprintf(stdOut, "\x1B[0;37mWatching File %s Error: %s\x1B[0m\n", batchFilePath, err.Error()) } break fileWatching } } + watcher.Close() continue } break diff --git a/cmd/brick/exec/callContract.go b/cmd/brick/exec/callContract.go index 6eaef19d9..0968757e1 100644 --- a/cmd/brick/exec/callContract.go +++ b/cmd/brick/exec/callContract.go @@ -65,6 +65,8 @@ func (c *callContract) parse(args string) (string, *big.Int, string, string, str expectedResult := "" if len(splitArgs) == 6 { expectedResult = splitArgs[5].Text + } else if len(splitArgs) > 6 { + return "", nil, "", "", "", "", fmt.Errorf("too many arguments. usage: %s", c.Usage()) } return splitArgs[0].Text, //accountName diff --git a/cmd/brick/exec/debug.go b/cmd/brick/exec/debug.go index 26c39412a..cb85a1086 100644 --- a/cmd/brick/exec/debug.go +++ b/cmd/brick/exec/debug.go @@ -15,8 +15,17 @@ func init() { registerExec(&delb{}) registerExec(&listb{}) registerExec(&resetb{}) + registerExec(&setw{}) + registerExec(&delw{}) + registerExec(&listw{}) + registerExec(&resetw{}) } +// ===================================== +// Breakpoint +// ===================================== + +// =========== setb ============== type setb struct{} func (c *setb) Command() string { @@ -191,7 +200,177 @@ func (c *resetb) Run(args string) (string, error) { return "reset breakpoints", nil } -// =========== interfaces ============== +// ===================================== +// Watchpoint +// ===================================== + +// =========== setw ============== +type setw struct{} + +func (c *setw) Command() string { + return "setw" +} + +func (c *setw) Syntax() string { + return fmt.Sprintf("%s", "") +} + +func (c *setw) Usage() string { + return "setw ``" +} + +func (c *setw) Describe() string { + return "set watchpoint" +} + +func (c *setw) Validate(args string) error { + + _, err := c.parse(args) + + return err +} + +func (c *setw) parse(args string) (string, error) { + splitArgs := context.SplitSpaceAndAccent(args, false) + if len(splitArgs) < 1 { + return "", fmt.Errorf("need an arguments. usage: %s", c.Usage()) + } + + return splitArgs[0].Text, nil +} + +func (c *setw) Run(args string) (string, error) { + watch_expr, _ := c.parse(args) + + err := contract.SetWatchPoint(watch_expr) + if err != nil { + return "", err + } + + return "set watchpoint: " + watch_expr, nil +} + +// =========== delw ============== + +type delw struct{} + +func (c *delw) Command() string { + return "delw" +} + +func (c *delw) Syntax() string { + return fmt.Sprintf("%s", "") +} + +func (c *delw) Usage() string { + return "delw " +} + +func (c *delw) Describe() string { + return "delete watchpoint" +} + +func (c *delw) Validate(args string) error { + + _, err := c.parse(args) + + return err +} + +func (c *delw) parse(args string) (uint64, error) { + splitArgs := context.SplitSpaceAndAccent(args, false) + if len(splitArgs) < 1 { + return 0, fmt.Errorf("need an arguments. usage: %s", c.Usage()) + } + + idx, err := strconv.ParseUint(splitArgs[0].Text, 10, 64) + if err != nil { + return 0, fmt.Errorf("fail to parse number %s: %s", splitArgs[0].Text, err.Error()) + } + + return idx, nil +} + +func (c *delw) Run(args string) (string, error) { + idx, _ := c.parse(args) + + err := contract.DelWatchPoint(idx) + if err != nil { + return "", err + } + + return "del watchpoint: " + fmt.Sprintf("%d", idx), nil +} + +// =========== listw ============== + +type listw struct{} + +func (c *listw) Command() string { + return "listw" +} + +func (c *listw) Syntax() string { + return "" +} + +func (c *listw) Usage() string { + return "listw" +} + +func (c *listw) Describe() string { + return "list all watchpoints" +} + +func (c *listw) Validate(args string) error { + return nil +} + +func (c *listw) Run(args string) (string, error) { + watchpoints := contract.ListWatchPoints() + i := 0 + for e := watchpoints.Front(); e != nil; e = e.Next() { + i++ + fmt.Printf("%d: %s\n", i, e.Value) + } + + return "list watchpoints", nil +} + +// =========== resetb ============== + +type resetw struct{} + +func (c *resetw) Command() string { + return "resetw" +} + +func (c *resetw) Syntax() string { + return "" +} + +func (c *resetw) Usage() string { + return "resetw" +} + +func (c *resetw) Describe() string { + return "reset all watchpoints" +} + +func (c *resetw) Validate(args string) error { + return nil +} + +func (c *resetw) Run(args string) (string, error) { + contract.ResetWatchPoints() + + return "reset watchpoints", nil +} + +// ===================================== +// interfaces +// ===================================== + func resetContractInfoInterface() { contract.ResetContractInfo() } diff --git a/cmd/brick/exec/deployContract.go b/cmd/brick/exec/deployContract.go index 174a79fd1..f74d03e50 100644 --- a/cmd/brick/exec/deployContract.go +++ b/cmd/brick/exec/deployContract.go @@ -3,8 +3,11 @@ package exec import ( "fmt" "io/ioutil" + "math/big" + "net/http" "os" - "strconv" + "path/filepath" + "strings" "github.com/aergoio/aergo/cmd/brick/context" "github.com/aergoio/aergo/contract" @@ -45,23 +48,58 @@ func (c *deployContract) Validate(args string) error { return err } -func (c *deployContract) parse(args string) (string, uint64, string, string, string, error) { +func (c *deployContract) readDefFile(defPath string) ([]byte, error) { + if strings.HasPrefix(defPath, "http") { + // search in the web + req, err := http.NewRequest("GET", defPath, nil) + if err != nil { + return nil, err + } + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + defByte, _ := ioutil.ReadAll(resp.Body) + + return defByte, nil + } + + // search in a local file system + if _, err := os.Stat(defPath); os.IsNotExist(err) { + return nil, err + } + defByte, err := ioutil.ReadFile(defPath) + if err != nil { + return nil, err + } + + return defByte, nil + +} + +func (c *deployContract) parse(args string) (string, *big.Int, string, string, string, error) { splitArgs := context.SplitSpaceAndAccent(args, false) if len(splitArgs) < 4 { - return "", 0, "", "", "", fmt.Errorf("need 4 arguments. usage: %s", c.Usage()) + return "", nil, "", "", "", fmt.Errorf("need 4 arguments. usage: %s", c.Usage()) } - amount, err := strconv.ParseUint(splitArgs[1].Text, 10, 64) - if err != nil { - return "", 0, "", "", "", fmt.Errorf("fail to parse number %s: %s", splitArgs[1].Text, err.Error()) + + amount, success := new(big.Int).SetString(splitArgs[1].Text, 10) + if success == false { + return "", nil, "", "", "", fmt.Errorf("fail to parse number %s", splitArgs[1].Text) } + defPath := splitArgs[3].Text - if _, err := os.Stat(defPath); os.IsNotExist(err) { - return "", 0, "", "", "", fmt.Errorf("fail to read a contrat def file %s: %s", splitArgs[3].Text, err.Error()) + if _, err := c.readDefFile(defPath); err != nil { + return "", nil, "", "", "", fmt.Errorf("fail to read a contrat def file %s: %s", splitArgs[3].Text, err.Error()) } constuctorArg := "[]" if len(splitArgs) == 5 { constuctorArg = splitArgs[4].Text + } else if len(splitArgs) > 5 { + return "", nil, "", "", "", fmt.Errorf("too many arguments. usage: %s", c.Usage()) } return splitArgs[0].Text, //accountName @@ -75,7 +113,7 @@ func (c *deployContract) parse(args string) (string, uint64, string, string, str func (c *deployContract) Run(args string) (string, error) { accountName, amount, contractName, defPath, constuctorArg, _ := c.parse(args) - defByte, err := ioutil.ReadFile(defPath) + defByte, err := c.readDefFile(defPath) if err != nil { return "", err } @@ -83,9 +121,18 @@ func (c *deployContract) Run(args string) (string, error) { updateContractInfoInterface(contractName, defPath) err = context.Get().ConnectBlock( - contract.NewRawLuaTxDef(accountName, contractName, amount, string(defByte)).Constructor(constuctorArg), + contract.NewRawLuaTxDefBig(accountName, contractName, amount, string(defByte)).Constructor(constuctorArg), ) + if enableWatch && !strings.HasPrefix(defPath, "http") { + absPath, _ := filepath.Abs(defPath) + watcher.Add(absPath) + } + + if err != nil { + return "", err + } + Index(context.ContractSymbol, contractName) Index(context.AccountSymbol, contractName) diff --git a/cmd/brick/exec/forward.go b/cmd/brick/exec/forward.go new file mode 100644 index 000000000..11be6bfb4 --- /dev/null +++ b/cmd/brick/exec/forward.go @@ -0,0 +1,70 @@ +package exec + +import ( + "fmt" + "strconv" + + "github.com/aergoio/aergo/cmd/brick/context" +) + +func init() { + registerExec(&forward{}) +} + +type forward struct{} + +func (c *forward) Command() string { + return "forward" +} + +func (c *forward) Syntax() string { + return context.AmountSymbol +} + +func (c *forward) Usage() string { + return "forward [height_to_skip]" +} + +func (c *forward) Describe() string { + return "fast forward blocks n times (default = 1)" +} + +func (c *forward) Validate(args string) error { + // is chain is loaded? + if context.Get() == nil { + return fmt.Errorf("load chain first") + } + + _, err := c.parse(args) + + return err +} + +func (c *forward) parse(args string) (int, error) { + splitArgs := context.SplitSpaceAndAccent(args, false) + if len(splitArgs) == 0 { + height, _ := strconv.Atoi("1") + return height, nil + } else if len(splitArgs) > 1 { + return 0, fmt.Errorf("need 1 or 0 arguments. usage: %s", c.Usage()) + } + + amount, err := strconv.Atoi(splitArgs[0].Text) + if err != nil { + return 0, fmt.Errorf("fail to parse number %s: %s", splitArgs[0].Text, err.Error()) + } + + return amount, nil +} + +func (c *forward) Run(args string) (string, error) { + amount, _ := c.parse(args) + + for i := 0; i < amount; i++ { + if err := context.Get().ConnectBlock(); err != nil { + return "", err + } + } + + return "fast forward blocks successfully", nil +} diff --git a/cmd/brick/exec/getstateAccount.go b/cmd/brick/exec/getstateAccount.go index c898c502d..6a6e266fb 100644 --- a/cmd/brick/exec/getstateAccount.go +++ b/cmd/brick/exec/getstateAccount.go @@ -49,6 +49,8 @@ func (c *getStateAccount) parse(args string) (string, string, error) { expectedResult := "" if len(splitArgs) == 2 { expectedResult = splitArgs[1].Text + } else if len(splitArgs) > 2 { + return "", "", fmt.Errorf("too many arguments. usage: %s", c.Usage()) } return splitArgs[0].Text, expectedResult, nil diff --git a/cmd/brick/exec/queryContract.go b/cmd/brick/exec/queryContract.go index 9978d3f19..658704f68 100644 --- a/cmd/brick/exec/queryContract.go +++ b/cmd/brick/exec/queryContract.go @@ -57,6 +57,8 @@ func (c *queryContract) parse(args string) (string, string, string, string, erro expectedResult := "" if len(splitArgs) == 4 { expectedResult = splitArgs[3].Text + } else if len(splitArgs) > 4 { + return "", "", "", "", fmt.Errorf("too many arguments. usage: %s", c.Usage()) } return splitArgs[0].Text, // contractName diff --git a/cmd/colaris/cmd/config.go b/cmd/colaris/cmd/config.go index e7ea6d7a6..50a2423e4 100644 --- a/cmd/colaris/cmd/config.go +++ b/cmd/colaris/cmd/config.go @@ -7,7 +7,7 @@ package cmd import ( "github.com/aergoio/aergo-lib/config" - "github.com/aergoio/aergo/p2p/pmap" + "github.com/aergoio/aergo/polaris/common" ) const ( @@ -37,7 +37,7 @@ type CliConfig struct { func (ctx *CliContext) GetDefaultConfig() interface{} { return CliConfig{ Host: "localhost", - Port: pmap.DefaultRPCPort, + Port: common.DefaultRPCPort, } } diff --git a/cmd/colaris/cmd/current.go b/cmd/colaris/cmd/current.go index 942ab569a..566b1596f 100644 --- a/cmd/colaris/cmd/current.go +++ b/cmd/colaris/cmd/current.go @@ -56,5 +56,11 @@ func execCurrentPeers(cmd *cobra.Command, args []string) { cmd.Printf("Failed: %s", err.Error()) return } + // TODO decorate other props also;e.g. uint64 timestamp to human readable time format! + for _, p := range msg.Peers { + if p.Verion == "" { + p.Verion = "(old)" + } + } cmd.Println(util.JSON(msg)) } diff --git a/cmd/colaris/cmd/root.go b/cmd/colaris/cmd/root.go index eeb3aebf0..8ed66e628 100644 --- a/cmd/colaris/cmd/root.go +++ b/cmd/colaris/cmd/root.go @@ -51,7 +51,7 @@ func init() { rootCmd.PersistentFlags().StringVar(&home, "home", "", "aergo cli home path") rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is cliconfig.toml)") rootCmd.PersistentFlags().StringVarP(&host, "host", "H", "localhost", "Host address to aergo server") - rootCmd.PersistentFlags().Int32VarP(&port, "port", "p", 7845, "Port number to aergo server") + rootCmd.PersistentFlags().Int32VarP(&port, "port", "p", 8915, "Port number to polaris server") } func initConfig() { diff --git a/cmd/polaris/polaris.go b/cmd/polaris/polaris.go index f055a65e8..e633a44db 100644 --- a/cmd/polaris/polaris.go +++ b/cmd/polaris/polaris.go @@ -7,7 +7,9 @@ package main import ( "fmt" "github.com/aergoio/aergo-actor/actor" - "github.com/aergoio/aergo/p2p/pmap" + "github.com/aergoio/aergo/p2p/p2pkey" + common2 "github.com/aergoio/aergo/polaris/common" + "github.com/aergoio/aergo/polaris/server" "net/http" _ "net/http/pprof" "os" @@ -16,7 +18,6 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/config" "github.com/aergoio/aergo/internal/common" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/pkg/component" "github.com/spf13/cobra" ) @@ -67,8 +68,8 @@ func initConfig() { } func arrangeDefaultCfgForPolaris(cfg *config.Config) { - cfg.RPC.NetServicePort = pmap.DefaultRPCPort - cfg.P2P.NetProtocolPort = pmap.DefaultSrvPort + cfg.RPC.NetServicePort = common2.DefaultRPCPort + cfg.P2P.NetProtocolPort = common2.DefaultSrvPort } func rootRun(cmd *cobra.Command, args []string) { @@ -88,13 +89,13 @@ func rootRun(cmd *cobra.Command, args []string) { svrlog.Warn().Msgf("Running with unsafe test mode. Turn off test mode for production use!") } - p2p.InitNodeInfo(&cfg.BaseConfig, cfg.P2P, svrlog) + p2pkey.InitNodeInfo(&cfg.BaseConfig, cfg.P2P, "TODO", svrlog) compMng := component.NewComponentHub() - lntc := pmap.NewNTContainer(cfg) - pmapSvc := pmap.NewPolarisService(cfg, lntc) - rpcSvc := pmap.NewPolarisRPC(cfg) + lntc := server.NewNTContainer(cfg) + pmapSvc := server.NewPolarisService(cfg, lntc) + rpcSvc := server.NewPolarisRPC(cfg) // Register services to Hub. Don't need to do nil-check since Register // function skips nil parameters. @@ -110,7 +111,6 @@ func rootRun(cmd *cobra.Command, args []string) { // actors are started. compMng.Start() - common.HandleKillSig(func() { //consensus.Stop(consensusSvc) compMng.Stop() @@ -127,7 +127,7 @@ type RedirectService struct { } func NewRedirectService(cfg *config.Config, svcPid string) *RedirectService { - logger := log.NewLogger(svcPid) + logger := log.NewLogger(svcPid) rs := &RedirectService{} rs.BaseComponent = component.NewBaseComponent(svcPid, rs, logger) @@ -138,10 +138,9 @@ func (rs *RedirectService) Receive(context actor.Context) { // ignore for now } -func (rs *RedirectService) BeforeStart() {} -func (rs *RedirectService) AfterStart() {} -func (rs *RedirectService) BeforeStop() {} - +func (rs *RedirectService) BeforeStart() {} +func (rs *RedirectService) AfterStart() {} +func (rs *RedirectService) BeforeStop() {} func (rs *RedirectService) Statistics() *map[string]interface{} { dummy := make(map[string]interface{}) diff --git a/cmd/polaris/polaris_test.go b/cmd/polaris/polaris_test.go index bfa4c89e7..a0e1e5f57 100644 --- a/cmd/polaris/polaris_test.go +++ b/cmd/polaris/polaris_test.go @@ -1,7 +1,7 @@ package main import ( - "github.com/aergoio/aergo/p2p/pmap" + "github.com/aergoio/aergo/polaris/common" _ "net/http/pprof" "testing" @@ -24,11 +24,11 @@ func Test_arrangeDefaultCfgForPolaris(t *testing.T) { t.Errorf("Assumption failure: default cfg.P2P.NetProtocolPort = %d, want %d",cfg.P2P.NetProtocolPort, 7846) } arrangeDefaultCfgForPolaris(cfg) - if cfg.RPC.NetServicePort != pmap.DefaultRPCPort { - t.Errorf("cfg.RPC.NetServicePort = %d, want %d",cfg.RPC.NetServicePort, pmap.DefaultRPCPort) + if cfg.RPC.NetServicePort != common.DefaultRPCPort { + t.Errorf("cfg.RPC.NetServicePort = %d, want %d",cfg.RPC.NetServicePort, common.DefaultRPCPort) } - if cfg.P2P.NetProtocolPort != pmap.DefaultSrvPort { - t.Errorf("cfg.P2P.NetProtocolPort = %d, want %d",cfg.P2P.NetProtocolPort, pmap.DefaultSrvPort) + if cfg.P2P.NetProtocolPort != common.DefaultSrvPort { + t.Errorf("cfg.P2P.NetProtocolPort = %d, want %d",cfg.P2P.NetProtocolPort, common.DefaultSrvPort) } }) } diff --git a/config/types.go b/config/types.go index 297e800b5..0397c59f3 100644 --- a/config/types.go +++ b/config/types.go @@ -106,17 +106,19 @@ type ConsensusConfig struct { } type RaftConfig struct { - RaftID uint64 `mapstructure:"raftid" description:"raft bp id. this value should be index of raftbpurls(1 <= raftid <= length of raftbpruls)"` - RaftListenUrl string `mapstructure:"raftlistenurl" description:"raft http bind address. If it was set, it only accept connection to this addresse only"` - RaftBPs []RaftBPConfig `mapstructure:"raftbps"` - RaftSkipEmpty bool `mapstructure:"raftskipempty" description:"skip producing block if there is no tx in block"` - RaftKeyFile string `mapstructure:"raftkeyfile" description:"Private Key file for raft https server"` - RaftCertFile string `mapstructure:"raftcertfile" description:"Certificate file for raft https server"` - RaftTick uint `mapstructure:"rafttick" description:"tick of raft server (millisec)"` + Name string `mapstructure:"name" description:"raft node name. this value must be unique in cluster"` + ListenUrl string `mapstructure:"listenurl" description:"raft http bind address. If it was set, it only accept connection to this addresse only"` + BPs []RaftBPConfig `mapstructure:"bps"` + SkipEmpty bool `mapstructure:"skipempty" description:"skip producing block if there is no tx in block"` + KeyFile string `mapstructure:"keyfile" description:"Private Key file for raft https server"` + CertFile string `mapstructure:"certfile" description:"Certificate file for raft https server"` + Tick uint `mapstructure:"tick" description:"tick of raft server (millisec)"` + NewCluster bool `mapstructure:"newcluster" description:"create a new raft cluster if it doesn't already exist"` + SnapFrequency uint64 `mapstructure:"snapfrequency" description:"frequency which raft make snapshot with log"` } type RaftBPConfig struct { - ID uint64 `mapstructure:"id" description:"raft ID"` + Name string `mapstructure:"name" description:"raft node name"` Url string `mapstructure:"url" description:"raft url"` P2pID string `mapstructure:"p2pid" description:"p2p ID of this bp"` } diff --git a/consensus/chain/block.go b/consensus/chain/block.go index 461874d90..c743907a1 100644 --- a/consensus/chain/block.go +++ b/consensus/chain/block.go @@ -3,6 +3,9 @@ package chain import ( "errors" "fmt" + "github.com/aergoio/aergo/internal/enc" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/libp2p/go-libp2p-peer" "time" "github.com/aergoio/aergo/chain" @@ -17,6 +20,7 @@ var ( ErrQuit = errors.New("shutdown initiated") errBlockSizeLimit = errors.New("the transactions included exceeded the block size limit") ErrBlockEmpty = errors.New("no transactions in block") + ErrSyncChain = errors.New("failed to sync request") ) // ErrTimeout can be used to indicatefor any kind of timeout. @@ -89,11 +93,11 @@ func GenerateBlock(hs component.ICompSyncRequester, prevBlock *types.Block, bSta } // ConnectBlock send an AddBlock request to the chain service. -func ConnectBlock(hs component.ICompSyncRequester, block *types.Block, blockState *state.BlockState) error { +func ConnectBlock(hs component.ICompSyncRequester, block *types.Block, blockState *state.BlockState, timeout time.Duration) error { // blockState does not include a valid BlockHash since it is constructed // from an incomplete block. So set it here. _, err := hs.RequestFuture(message.ChainSvc, &message.AddBlock{PeerID: "", Block: block, Bstate: blockState}, - time.Second, "consensus/chain/info.ConnectBlock").Result() + timeout, "consensus/chain/info.ConnectBlock").Result() if err != nil { logger.Error().Err(err).Uint64("no", block.Header.BlockNo). Str("hash", block.ID()). @@ -105,3 +109,27 @@ func ConnectBlock(hs component.ICompSyncRequester, block *types.Block, blockStat return nil } + +func SyncChain(hs *component.ComponentHub, targetHash []byte, targetNo types.BlockNo, peerID peer.ID) error { + logger.Info().Str("peer", p2putil.ShortForm(peerID)).Uint64("no", targetNo). + Str("hash", enc.ToString(targetHash)).Msg("request to sync for consensus") + + notiC := make(chan error) + hs.Tell(message.SyncerSvc, &message.SyncStart{PeerID: peerID, TargetNo: targetNo, NotifyC: notiC}) + + // wait end of sync every 1sec + select { + case err := <-notiC: + if err != nil { + logger.Error().Err(err).Uint64("no", targetNo). + Str("hash", enc.ToString(targetHash)). + Msg("failed to sync") + + return err + } + } + + logger.Info().Str("peer", p2putil.ShortForm(peerID)).Msg("succeeded to sync for consensus") + // TODO check best block is equal to target Hash/no + return nil +} diff --git a/consensus/consensus.go b/consensus/consensus.go index 7594d164a..4b5cafc8f 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -6,13 +6,16 @@ package consensus import ( + "context" "encoding/json" + "errors" "fmt" "time" "github.com/aergoio/aergo-lib/db" "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/types" + "github.com/aergoio/etcd/raft/raftpb" ) // DefaultBlockIntervalSec is the default block generation interval in seconds. @@ -28,6 +31,10 @@ var ( logger = log.NewLogger("consensus") ) +var ( + ErrNotSupportedMethod = errors.New("not supported metehod in this consensus") +) + // InitBlockInterval initializes block interval parameters. func InitBlockInterval(blockIntervalSec int64) { if blockIntervalSec > 0 { @@ -66,6 +73,8 @@ type Consensus interface { type ConsensusAccessor interface { ConsensusInfo() *types.ConsensusInfo + ConfChange(req *types.MembershipChange) (*Member, error) + ClusterInfo() ([]*types.MemberAttr, []byte, error) } // ChainDB is a reader interface for the ChainDB. @@ -95,11 +104,29 @@ type ChainConsensus interface { VerifySign(block *types.Block) error IsBlockValid(block *types.Block, bestBlock *types.Block) error Update(block *types.Block) - Save(tx db.Transaction) error + Save(tx TxWriter) error NeedReorganization(rootNo types.BlockNo) bool + NeedNotify() bool + HasWAL() bool // if consensus has WAL, block has already written in db Info() string } +type TxWriter interface { + Set(key, value []byte) +} + +type ConfChangePropose struct { + Ctx context.Context + Cc *raftpb.ConfChange + + ReplyC chan *ConfChangeReply +} + +type ConfChangeReply struct { + Member *Member + Err error +} + // Info represents an information for a consensus implementation. type Info struct { Type string diff --git a/consensus/impl/dpos/blockfactory.go b/consensus/impl/dpos/blockfactory.go index efc41e2e1..5c3887c4a 100644 --- a/consensus/impl/dpos/blockfactory.go +++ b/consensus/impl/dpos/blockfactory.go @@ -7,6 +7,7 @@ package dpos import ( "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" "runtime" "time" @@ -15,7 +16,6 @@ import ( "github.com/aergoio/aergo/consensus/chain" "github.com/aergoio/aergo/contract" "github.com/aergoio/aergo/internal/enc" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/pkg/component" "github.com/aergoio/aergo/state" "github.com/aergoio/aergo/types" @@ -31,10 +31,10 @@ type txExec struct { execTx bc.TxExecFn } -func newTxExec(blockNo types.BlockNo, ts int64, prevHash []byte, chainID []byte) chain.TxOp { +func newTxExec(cdb contract.ChainAccessor, blockNo types.BlockNo, ts int64, prevHash []byte, chainID []byte) chain.TxOp { // Block hash not determined yet return &txExec{ - execTx: bc.NewTxExecutor(blockNo, ts, prevHash, contract.BlockFactory, chainID), + execTx: bc.NewTxExecutor(cdb, blockNo, ts, prevHash, contract.BlockFactory, chainID), } } @@ -66,8 +66,8 @@ func NewBlockFactory(hub *component.ComponentHub, sdb *state.ChainStateDB, quitC bpTimeoutC: make(chan interface{}, 1), maxBlockBodySize: chain.MaxBlockBodySize(), quit: quitC, - ID: p2p.NodeSID(), - privKey: p2p.NodePrivKey(), + ID: p2pkey.NodeSID(), + privKey: p2pkey.NodePrivKey(), sdb: sdb, } @@ -189,7 +189,7 @@ func (bf *BlockFactory) worker() { continue } - err = chain.ConnectBlock(bf, block, blockState) + err = chain.ConnectBlock(bf, block, blockState, time.Second) if err == nil { lpbNo = block.BlockNo() } else { @@ -217,7 +217,7 @@ func (bf *BlockFactory) generateBlock(bpi *bpInfo, lpbNo types.BlockNo) (block * txOp := chain.NewCompTxOp( bf.txOp, - newTxExec(bpi.bestBlock.GetHeader().GetBlockNo()+1, ts, bpi.bestBlock.BlockHash(), bpi.bestBlock.GetHeader().ChainID), + newTxExec(contract.ChainAccessor(bpi.ChainDB), bpi.bestBlock.GetHeader().GetBlockNo()+1, ts, bpi.bestBlock.BlockHash(), bpi.bestBlock.GetHeader().ChainID), ) block, err = chain.GenerateBlock(bf, bpi.bestBlock, bs, txOp, ts, false) diff --git a/consensus/impl/dpos/dpos.go b/consensus/impl/dpos/dpos.go index 3a5642e9e..1d70c9cb2 100644 --- a/consensus/impl/dpos/dpos.go +++ b/consensus/impl/dpos/dpos.go @@ -8,6 +8,7 @@ package dpos import ( "encoding/json" "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" "time" "github.com/aergoio/aergo-lib/log" @@ -15,7 +16,6 @@ import ( "github.com/aergoio/aergo/consensus" "github.com/aergoio/aergo/consensus/impl/dpos/bp" "github.com/aergoio/aergo/consensus/impl/dpos/slot" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/pkg/component" "github.com/aergoio/aergo/state" "github.com/aergoio/aergo/types" @@ -164,7 +164,7 @@ func (dpos *DPoS) QuitChan() chan interface{} { } func (dpos *DPoS) bpid() peer.ID { - return p2p.NodeID() + return p2pkey.NodeID() } // VerifyTimestamp checks the validity of the block timestamp. @@ -317,3 +317,19 @@ func isBpTiming(block *types.Block, s *slot.Slot) bool { return true } + +func (dpos *DPoS) NeedNotify() bool { + return true +} + +func (dpos *DPoS) HasWAL() bool { + return false +} + +func (dpos *DPoS) ConfChange(req *types.MembershipChange) (*consensus.Member, error) { + return nil, consensus.ErrNotSupportedMethod +} + +func (dpos *DPoS) ClusterInfo() ([]*types.MemberAttr, []byte, error) { + return nil, nil, consensus.ErrNotSupportedMethod +} diff --git a/consensus/impl/dpos/lib.go b/consensus/impl/dpos/lib.go index 4207699e8..dac38fd60 100644 --- a/consensus/impl/dpos/lib.go +++ b/consensus/impl/dpos/lib.go @@ -3,11 +3,12 @@ package dpos import ( "container/list" "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" "sort" "github.com/aergoio/aergo-lib/db" + "github.com/aergoio/aergo/consensus" "github.com/aergoio/aergo/internal/common" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/types" "github.com/davecgh/go-spew/spew" ) @@ -52,7 +53,7 @@ func newLibStatus(confirmsRequired uint16) *libStatus { Prpsd: make(proposed), Lib: &blockInfo{}, confirms: list.New(), - bpid: p2p.NodeSID(), + bpid: p2pkey.NodeSID(), confirmsRequired: confirmsRequired, } } @@ -211,7 +212,7 @@ func (ls *libStatus) load(endBlockNo types.BlockNo) { } } -func (ls *libStatus) save(tx db.Transaction) error { +func (ls *libStatus) save(tx consensus.TxWriter) error { b, err := common.GobEncode(ls) if err != nil { return err diff --git a/consensus/impl/dpos/status.go b/consensus/impl/dpos/status.go index e47b2c4cf..73934d914 100644 --- a/consensus/impl/dpos/status.go +++ b/consensus/impl/dpos/status.go @@ -4,7 +4,6 @@ import ( "encoding/json" "sync" - "github.com/aergoio/aergo-lib/db" "github.com/aergoio/aergo/consensus" "github.com/aergoio/aergo/consensus/impl/dpos/bp" "github.com/aergoio/aergo/state" @@ -148,7 +147,7 @@ func (s *Status) updateLIB(lib *blockInfo) { } // Save saves the consensus status information for the later recovery. -func (s *Status) Save(tx db.Transaction) error { +func (s *Status) Save(tx consensus.TxWriter) error { s.Lock() defer s.Unlock() diff --git a/consensus/impl/impl.go b/consensus/impl/impl.go index 5be5c9153..2a4404130 100644 --- a/consensus/impl/impl.go +++ b/consensus/impl/impl.go @@ -10,15 +10,16 @@ import ( "github.com/aergoio/aergo/config" "github.com/aergoio/aergo/consensus" "github.com/aergoio/aergo/consensus/impl/dpos" - "github.com/aergoio/aergo/consensus/impl/raft" + "github.com/aergoio/aergo/consensus/impl/raftv2" "github.com/aergoio/aergo/consensus/impl/sbp" + "github.com/aergoio/aergo/p2p" + "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/pkg/component" "github.com/aergoio/aergo/rpc" - "github.com/aergoio/aergo/types" ) // New returns consensus.Consensus based on the configuration parameters. -func New(cfg *config.Config, hub *component.ComponentHub, cs *chain.ChainService, pa types.PeerAccessor, rpcSvc *rpc.RPC) (consensus.Consensus, error) { +func New(cfg *config.Config, hub *component.ComponentHub, cs *chain.ChainService, p2psvc *p2p.P2P, rpcSvc *rpc.RPC) (consensus.Consensus, error) { var ( c consensus.Consensus err error @@ -34,24 +35,25 @@ func New(cfg *config.Config, hub *component.ComponentHub, cs *chain.ChainService consensus.InitBlockInterval(blockInterval) - if c, err = newConsensus(cfg, hub, cs); err == nil { + if c, err = newConsensus(cfg, hub, cs, p2psvc.GetPeerAccessor()); err == nil { // Link mutual references. cs.SetChainConsensus(c) rpcSvc.SetConsensusAccessor(c) + p2psvc.SetConsensusAccessor(c) } return c, err } func newConsensus(cfg *config.Config, hub *component.ComponentHub, - cs *chain.ChainService) (consensus.Consensus, error) { + cs *chain.ChainService, pa p2pcommon.PeerAccessor) (consensus.Consensus, error) { cdb := cs.CDB() sdb := cs.SDB() impl := map[string]consensus.Constructor{ - dpos.GetName(): dpos.GetConstructor(cfg, hub, cdb, sdb), // DPoS - sbp.GetName(): sbp.GetConstructor(cfg, hub, cdb, sdb), // Simple BP - raft.GetName(): raft.GetConstructor(cfg, hub, cdb, sdb), // Raft BP + dpos.GetName(): dpos.GetConstructor(cfg, hub, cdb, sdb), // DPoS + sbp.GetName(): sbp.GetConstructor(cfg, hub, cdb, sdb), // Simple BP + raftv2.GetName(): raftv2.GetConstructor(cfg, hub, cs.WalDB(), sdb, pa), // Raft BP } return impl[cdb.GetGenesisInfo().ConsensusType()]() diff --git a/consensus/impl/raft/blockfactory.go b/consensus/impl/raft/blockfactory.go deleted file mode 100644 index bd8f4f3a6..000000000 --- a/consensus/impl/raft/blockfactory.go +++ /dev/null @@ -1,365 +0,0 @@ -package raft - -import ( - "encoding/json" - "errors" - "fmt" - "runtime" - "time" - - "github.com/aergoio/aergo/internal/enc" - "github.com/aergoio/aergo/p2p" - crypto "github.com/libp2p/go-libp2p-crypto" - - "github.com/aergoio/aergo-lib/db" - "github.com/aergoio/aergo-lib/log" - bc "github.com/aergoio/aergo/chain" - "github.com/aergoio/aergo/config" - "github.com/aergoio/aergo/consensus" - "github.com/aergoio/aergo/consensus/chain" - "github.com/aergoio/aergo/contract" - "github.com/aergoio/aergo/pkg/component" - "github.com/aergoio/aergo/state" - "github.com/aergoio/aergo/types" - - "github.com/aergoio/etcd/raft/raftpb" -) - -const ( - slotQueueMax = 100 -) - -var ( - logger *log.Logger - httpLogger *log.Logger - RaftTick = DefaultTickMS - RaftSkipEmptyBlock = false - peerCheckInterval = time.Second * 3 -) - -var ( - ErrBFQuit = errors.New("block factory quit") -) - -func init() { - logger = log.NewLogger("raft") - httpLogger = log.NewLogger("rafthttp") -} - -type txExec struct { - execTx bc.TxExecFn -} - -func newTxExec(blockNo types.BlockNo, ts int64, prevHash []byte, chainID []byte) chain.TxOp { - // Block hash not determined yet - return &txExec{ - execTx: bc.NewTxExecutor(blockNo, ts, prevHash, contract.BlockFactory, chainID), - } -} - -func (te *txExec) Apply(bState *state.BlockState, tx types.Transaction) error { - err := te.execTx(bState, tx) - return err -} - -// BlockFactory implments a raft block factory which generate block each cfg.Consensus.BlockInterval if this node is leader of raft -// -// This can be used for testing purpose. -type BlockFactory struct { - *component.ComponentHub - consensus.ChainDB - - bpc *Cluster - jobQueue chan interface{} - quit chan interface{} - blockInterval time.Duration - maxBlockBodySize uint32 - ID string - privKey crypto.PrivKey - txOp chain.TxOp - sdb *state.ChainStateDB - prevBlock *types.Block // best block of last job - - raftServer *raftServer -} - -// GetName returns the name of the consensus. -func GetName() string { - return consensus.ConsensusName[consensus.ConsensusRAFT] -} - -// GetConstructor build and returns consensus.Constructor from New function. -func GetConstructor(cfg *config.Config, hub *component.ComponentHub, cdb consensus.ChainDB, - sdb *state.ChainStateDB) consensus.Constructor { - return func() (consensus.Consensus, error) { - return New(cfg, hub, cdb, sdb) - } -} - -// New returns a BlockFactory. -func New(cfg *config.Config, hub *component.ComponentHub, cdb consensus.ChainDB, - sdb *state.ChainStateDB) (*BlockFactory, error) { - - bf := &BlockFactory{ - ComponentHub: hub, - ChainDB: cdb, - jobQueue: make(chan interface{}, slotQueueMax), - blockInterval: time.Second * time.Duration(cfg.Consensus.BlockInterval), - maxBlockBodySize: chain.MaxBlockBodySize(), - quit: make(chan interface{}), - ID: p2p.NodeSID(), - privKey: p2p.NodePrivKey(), - sdb: sdb, - } - - if cfg.Consensus.EnableBp { - if err := bf.newRaftServer(cfg); err != nil { - logger.Error().Err(err).Msg("failed to init raft server") - return bf, err - } - } - - bf.txOp = chain.NewCompTxOp( - chain.TxOpFn(func(bState *state.BlockState, txIn types.Transaction) error { - select { - case <-bf.quit: - return chain.ErrQuit - default: - return nil - } - }), - ) - - return bf, nil -} - -func (bf *BlockFactory) newRaftServer(cfg *config.Config) error { - if err := bf.InitCluster(cfg); err != nil { - return err - } - - proposeC := make(chan string, 1) - confChangeC := make(chan raftpb.ConfChange, 1) - - waldir := fmt.Sprintf("%s/raft/wal", cfg.DataDir) - snapdir := fmt.Sprintf("%s/raft/snap", cfg.DataDir) - - logger.Info().Uint64("RaftID", bf.bpc.ID).Str("waldir", waldir).Str("snapdir", snapdir).Msg("raft server start") - - bf.raftServer = newRaftServer(bf.bpc.ID, cfg.Consensus.Raft.RaftListenUrl, bf.bpc.BPUrls, false, waldir, snapdir, - cfg.Consensus.Raft.RaftCertFile, cfg.Consensus.Raft.RaftKeyFile, - nil, RaftTick, proposeC, confChangeC, true) - - bf.bpc.rs = bf.raftServer - - return nil -} - -// Ticker returns a time.Ticker for the main consensus loop. -func (bf *BlockFactory) Ticker() *time.Ticker { - return time.NewTicker(bf.blockInterval) -} - -// QueueJob send a block triggering information to jq. -func (bf *BlockFactory) QueueJob(now time.Time, jq chan<- interface{}) { - if !bf.raftServer.IsLeader() { - logger.Debug().Msg("skip producing block because this bp is not leader") - return - } - - if b, _ := bf.GetBestBlock(); b != nil { - //TODO is it ok if last job was failed? - if bf.prevBlock != nil && bf.prevBlock.BlockNo() == b.BlockNo() { - logger.Debug().Msg("previous block not connected. skip to generate block") - return - } - bf.prevBlock = b - jq <- b - } -} - -func (bf *BlockFactory) GetType() consensus.ConsensusType { - return consensus.ConsensusRAFT -} - -// IsTransactionValid checks the onsensus level validity of a transaction -func (bf *BlockFactory) IsTransactionValid(tx *types.Tx) bool { - // BlockFactory has no tx valid check. - return true -} - -// VerifyTimestamp checks the validity of the block timestamp. -func (bf *BlockFactory) VerifyTimestamp(*types.Block) bool { - // BlockFactory don't need to check timestamp. - return true -} - -// VerifySign checks the consensus level validity of a block. -func (bf *BlockFactory) VerifySign(block *types.Block) error { - valid, err := block.VerifySign() - if !valid || err != nil { - return &consensus.ErrorConsensus{Msg: "bad block signature", Err: err} - } - return nil -} - -// IsBlockValid checks the consensus level validity of a block. -func (bf *BlockFactory) IsBlockValid(block *types.Block, bestBlock *types.Block) error { - // BlockFactory has no block valid check. - _, err := block.BPID() - if err != nil { - return &consensus.ErrorConsensus{Msg: "bad public key in block", Err: err} - } - return nil -} - -// QuitChan returns the channel from which consensus-related goroutines check -// when shutdown is initiated. -func (bf *BlockFactory) QuitChan() chan interface{} { - return bf.quit -} - -// Update has nothging to do. -func (bf *BlockFactory) Update(block *types.Block) { -} - -// Save has nothging to do. -func (bf *BlockFactory) Save(tx db.Transaction) error { - return nil -} - -// BlockFactory returns r itself. -func (bf *BlockFactory) BlockFactory() consensus.BlockFactory { - return bf -} - -// NeedReorganization has nothing to do. -func (bf *BlockFactory) NeedReorganization(rootNo types.BlockNo) bool { - return true -} - -// Start run a raft block factory service. -func (bf *BlockFactory) Start() { - defer logger.Info().Msg("shutdown initiated. stop the service") - - bf.raftServer.Start() - - runtime.LockOSThread() - - // 1. sync blockchain - if err := bf.waitSyncWithMajority(); err != nil { - logger.Error().Err(err).Msg("wait sync with majority failed") - return - } - - // 2. raft can be candidate - // if this node hasn't been synchronized, it must not be candidate. - // otherwise producing block will be stop until synchronization complete - bf.raftServer.SetPromotable(true) - - // 3. wait to commit all uncommited log in WAL, and start - bf.raftServer.WaitStartup() - - for { - select { - case e := <-bf.jobQueue: - if prevBlock, ok := e.(*types.Block); ok { - blockState := bf.sdb.NewBlockState(prevBlock.GetHeader().GetBlocksRootHash()) - - ts := time.Now().UnixNano() - - txOp := chain.NewCompTxOp( - bf.txOp, - newTxExec(prevBlock.GetHeader().GetBlockNo()+1, ts, prevBlock.GetHash(), prevBlock.GetHeader().GetChainID()), - ) - - block, err := chain.GenerateBlock(bf, prevBlock, blockState, txOp, ts, RaftSkipEmptyBlock) - if err == chain.ErrQuit { - return - } else if err == chain.ErrBlockEmpty { - continue - } else if err != nil { - logger.Info().Err(err).Msg("failed to produce block") - continue - } - - if err = block.Sign(bf.privKey); err != nil { - logger.Error().Err(err).Msg("failed to sign in block") - continue - } - - logger.Info().Str("blockProducer", bf.ID).Str("raftID", block.ID()). - Str("sroot", enc.ToString(block.GetHeader().GetBlocksRootHash())). - Uint64("no", block.GetHeader().GetBlockNo()). - Str("hash", block.ID()). - Msg("block produced") - - if !bf.raftServer.IsLeader() { - logger.Info().Msg("skip producing block because this bp is not leader") - continue - } - - //if bestblock is changed, connecting block failed. new block is generated in next tick - if err := chain.ConnectBlock(bf, block, blockState); err != nil { - logger.Error().Msg(err.Error()) - } - } - case <-bf.quit: - return - } - } -} - -// waitUntilStartable wait until this chain synchronizes with more than half of all peers -func (bf *BlockFactory) waitSyncWithMajority() error { - ticker := time.NewTicker(peerCheckInterval) - - for { - select { - case <-ticker.C: - if synced, err := bf.bpc.hasSynced(); err != nil { - logger.Error().Err(err).Msg("failed to check sync with a majority of peers") - return err - } else if synced { - return nil - } - - case <-bf.QuitChan(): - logger.Info().Msg("quit while wait sync") - return ErrBFQuit - default: - } - } -} - -// JobQueue returns the queue for block production triggering. -func (bf *BlockFactory) JobQueue() chan<- interface{} { - return bf.jobQueue -} - -// Info retuns an empty string. -func (bf *BlockFactory) Info() string { - // TODO: Returns a appropriate information inx json format like current - // leader, etc. - info := consensus.NewInfo(GetName()) - if bf.raftServer == nil { - return info.AsJSON() - } - - b, err := json.Marshal(bf.bpc.getRaftInfo(false)) - if err != nil { - logger.Error().Err(err).Msg("failed to marshal raft consensus") - } else { - m := json.RawMessage(b) - info.Status = &m - } - - return info.AsJSON() -} - -func (bf *BlockFactory) ConsensusInfo() *types.ConsensusInfo { - if bf.bpc == nil { - return &types.ConsensusInfo{Type: GetName()} - } - return bf.bpc.toConsensusInfo() -} diff --git a/consensus/impl/raft/cluster.go b/consensus/impl/raft/cluster.go deleted file mode 100644 index ca9b2ac62..000000000 --- a/consensus/impl/raft/cluster.go +++ /dev/null @@ -1,249 +0,0 @@ -package raft - -import ( - "encoding/json" - "fmt" - "github.com/aergoio/aergo/consensus" - "github.com/aergoio/aergo/message" - "github.com/aergoio/aergo/p2p" - "github.com/aergoio/aergo/p2p/p2putil" - "github.com/aergoio/aergo/pkg/component" - "github.com/aergoio/aergo/types" - "github.com/libp2p/go-libp2p-peer" - "strconv" - "sync" - "time" -) - -// raft cluster membership -// copy from dpos/bp -// TODO refactoring -// Cluster represents a cluster of block producers. -type Cluster struct { - component.ICompSyncRequester - rs *raftServer - sync.Mutex - - ID uint64 - Size uint16 - Member map[uint64]*blockProducer - Index map[peer.ID]uint64 // peer ID to raft ID mapping - - BPUrls []string //for raft server - - cdb consensus.ChainDB -} - -type RaftInfo struct { - Leader string - Total string - RaftId string - Status *json.RawMessage -} - -type blockProducer struct { - RaftID uint64 - Url string - PeerID peer.ID -} - -func (bp *blockProducer) isDifferent(x *blockProducer) bool { - if bp.RaftID == x.RaftID || bp.Url == x.Url || bp.PeerID == x.PeerID { - return false - } - - return true -} - -func NewCluster(bf *BlockFactory, raftID uint64, size uint16) *Cluster { - cl := &Cluster{ - ICompSyncRequester: bf, - ID: raftID, - Size: size, - Member: make(map[uint64]*blockProducer), - Index: make(map[peer.ID]uint64), - BPUrls: make([]string, size), - cdb: bf.ChainDB, - } - - return cl -} - -func (cl *Cluster) Quorum() uint16 { - return cl.Size/2 + 1 -} - -func (cc *Cluster) addMember(id uint64, url string, peerID peer.ID) error { - //check unique - bp := &blockProducer{RaftID: id, Url: url, PeerID: peerID} - - for prevID, prevBP := range cc.Member { - if prevID == id { - return ErrDupBP - } - - if !prevBP.isDifferent(bp) { - return ErrDupBP - } - } - - // check if mapping between raft id and PeerID is valid - if cc.ID == id && peerID != p2p.NodeID() { - return ErrInvalidRaftPeerID - } - - cc.Member[id] = bp - cc.Index[bp.PeerID] = id - cc.BPUrls[id-1] = bp.Url - - return nil -} - -func MaxUint64(x, y uint64) uint64 { - if x < y { - return y - } - return x -} - -// hasSynced get result of GetPeers request from P2P service and check if chain of this node is synchronized with majority of members -func (cc *Cluster) hasSynced() (bool, error) { - var peers map[peer.ID]*message.PeerInfo - var err error - var peerBestNo uint64 = 0 - - if cc.Size == 1 { - return true, nil - } - - // request GetPeers to p2p - getBPPeers := func() (map[peer.ID]*message.PeerInfo, error) { - peers := make(map[peer.ID]*message.PeerInfo) - - result, err := cc.RequestFuture(message.P2PSvc, &message.GetPeers{}, time.Second, "raft cluster sync test").Result() - if err != nil { - return nil, err - } - - msg := result.(*message.GetPeersRsp) - - for _, peerElem := range msg.Peers { - peerID := peer.ID(peerElem.Addr.PeerID) - state := peerElem.State - - if peerElem.Self { - continue - } - - if state.Get() != types.RUNNING { - logger.Debug().Str("peer", p2putil.ShortForm(peerID)).Msg("peer is not running") - continue - - } - - // check if peer is not bp - if _, ok := cc.Index[peerID]; !ok { - continue - } - - peers[peerID] = peerElem - - peerBestNo = MaxUint64(peerElem.LastBlockNumber, peerBestNo) - } - - return peers, nil - } - - if peers, err = getBPPeers(); err != nil { - return false, err - } - - if uint16(len(peers)) < (cc.Quorum() - 1) { - logger.Debug().Msg("a majority of peers are not connected") - return false, nil - } - - var best *types.Block - if best, err = cc.cdb.GetBestBlock(); err != nil { - return false, err - } - - if best.BlockNo()+DefaultMarginChainDiff < peerBestNo { - logger.Debug().Uint64("best", best.BlockNo()).Uint64("peerbest", peerBestNo).Msg("chain was not synced with majority of peers") - return false, nil - } - - logger.Debug().Uint64("best", best.BlockNo()).Uint64("peerbest", peerBestNo).Int("margin", DefaultMarginChainDiff).Msg("chain has been synced with majority of peers") - - return true, nil -} - -func (cc *Cluster) toString() string { - var buf string - - buf = fmt.Sprintf("raft cluster configure: total=%d, RaftID=%d, bps=[", cc.Size, cc.ID) - for _, bp := range cc.Member { - bpbuf := fmt.Sprintf("{ id:%d, Url:%s, PeerID:%s }", bp.RaftID, bp.Url, bp.PeerID) - buf += bpbuf - } - fmt.Sprintf("]") - - return buf -} - -func (cl *Cluster) getRaftInfo(withStatus bool) *RaftInfo { - var leader uint64 - if cl.rs != nil { - leader = cl.rs.GetLeader() - } - - rinfo := &RaftInfo{Leader: strconv.FormatUint(uint64(leader), 10), Total: strconv.FormatUint(uint64(cl.Size), 10), RaftId: strconv.FormatUint(uint64(cl.ID), 10)} - - if withStatus && cl.rs != nil { - b, err := cl.rs.Status().MarshalJSON() - if err != nil { - logger.Error().Err(err).Msg("failed to marshal raft consensus") - } else { - m := json.RawMessage(b) - rinfo.Status = &m - } - } - return rinfo -} - -func (cl *Cluster) toConsensusInfo() *types.ConsensusInfo { - emptyCons := types.ConsensusInfo{ - Type: GetName(), - } - - type PeerInfo struct { - RaftID string - PeerID string - } - - b, err := json.Marshal(cl.getRaftInfo(true)) - if err != nil { - logger.Error().Err(err).Msg("failed to marshal raft consensus") - return &emptyCons - } - - cons := emptyCons - cons.Info = string(b) - - var i int = 0 - bps := make([]string, cl.Size) - for id, m := range cl.Member { - bp := &PeerInfo{RaftID: strconv.FormatUint(uint64(m.RaftID), 10), PeerID: m.PeerID.Pretty()} - b, err = json.Marshal(bp) - if err != nil { - logger.Error().Err(err).Uint64("raftid", id).Msg("failed to marshal raft consensus bp") - return &emptyCons - } - bps[i] = string(b) - - i++ - } - cons.Bps = bps - - return &cons -} diff --git a/consensus/impl/raft/config.go b/consensus/impl/raft/config.go deleted file mode 100644 index e4ca3238e..000000000 --- a/consensus/impl/raft/config.go +++ /dev/null @@ -1,196 +0,0 @@ -package raft - -import ( - "errors" - "fmt" - "github.com/aergoio/aergo/config" - "github.com/libp2p/go-libp2p-peer" - "net" - "net/url" - "os" - "strings" - "time" -) - -var ( - ErrInvalidRaftID = errors.New("invalid raft raftID") - ErrDupRaftUrl = errors.New("duplicated raft bp urls") - ErrRaftEmptyTLSFile = errors.New("cert or key file name is empty") - ErrNotHttpsURL = errors.New("url scheme is not https") - ErrURLInvalidScheme = errors.New("url has invalid scheme") - ErrURLInvalidPort = errors.New("url must have host:port style") - ErrInvalidRaftBPID = errors.New("raft bp raftID is not ordered. raftID must start with 1 and be sorted") - ErrDupBP = errors.New("raft bp description is duplicated") - ErrInvalidRaftPeerID = errors.New("peerID of current raft bp is not equals to p2p configure") -) - -const ( - DefaultMarginChainDiff = 1 - DefaultTickMS = time.Millisecond * 30 -) - -func (bf *BlockFactory) InitCluster(cfg *config.Config) error { - useTls := true - var err error - - raftConfig := cfg.Consensus.Raft - if raftConfig == nil { - panic("raftconfig is not set. please set raftID, raftBPs.") - } - - //set default - if raftConfig.RaftTick != 0 { - RaftTick = time.Duration(raftConfig.RaftTick * 1000000) - } - - lenBPs := len(raftConfig.RaftBPs) - raftID := raftConfig.RaftID - - bf.bpc = NewCluster(bf, raftID, uint16(lenBPs)) - - if raftID <= 0 || raftID > uint64(lenBPs) { - logger.Error().Err(err).Msg("raft raftID has the following values: 1 <= raft raftID <= len(bpcount)") - - return ErrInvalidRaftID - } - - if useTls, err = validateTLS(raftConfig); err != nil { - logger.Error().Err(err). - Str("key", raftConfig.RaftKeyFile). - Str("cert", raftConfig.RaftCertFile). - Msg("failed to validate tls config for raft") - return err - } - - if raftConfig.RaftListenUrl != "" { - if err := isValidURL(raftConfig.RaftListenUrl, useTls); err != nil { - logger.Error().Err(err).Msg("failed to validate listen url for raft") - return err - } - } - - if err = bf.bpc.addMembers(raftConfig, useTls); err != nil { - logger.Error().Err(err).Msg("failed to validate bpurls, bpid config for raft") - return err - } - - RaftSkipEmptyBlock = raftConfig.RaftSkipEmpty - - logger.Info().Bool("skipempty", RaftSkipEmptyBlock).Int64("rafttick(nanosec)", RaftTick.Nanoseconds()).Float64("interval(sec)", bf.blockInterval.Seconds()).Msg(bf.bpc.toString()) - - return nil -} - -func validateTLS(raftCfg *config.RaftConfig) (bool, error) { - if len(raftCfg.RaftCertFile) == 0 && len(raftCfg.RaftKeyFile) == 0 { - return false, nil - } - - //ë‘ íŒŒì¼ì´ ëª¨ë‘ ì„¤ì •ë˜ì–´ 있는지 í™•ì¸ - //실제 fileì— ì¡´ìž¬í•˜ëŠ”ì§€ í™•ì¸ - if len(raftCfg.RaftCertFile) == 0 || len(raftCfg.RaftKeyFile) == 0 { - logger.Error().Str("raftcertfile", raftCfg.RaftCertFile).Str("raftkeyfile", raftCfg.RaftKeyFile). - Msg(ErrRaftEmptyTLSFile.Error()) - return false, ErrRaftEmptyTLSFile - } - - if len(raftCfg.RaftCertFile) != 0 { - if _, err := os.Stat(raftCfg.RaftCertFile); err != nil { - logger.Error().Err(err).Msg("not exist certificate file for raft") - return false, err - } - } - - if len(raftCfg.RaftKeyFile) != 0 { - if _, err := os.Stat(raftCfg.RaftKeyFile); err != nil { - logger.Error().Err(err).Msg("not exist Key file for raft") - return false, err - } - } - - return true, nil -} - -func isValidURL(urlstr string, useTls bool) error { - var urlobj *url.URL - var err error - - if urlobj, err = parseToUrl(urlstr); err != nil { - logger.Error().Str("url", urlstr).Err(err).Msg("raft bp urlstr is not vaild form") - return err - } - - if useTls && urlobj.Scheme != "https" { - logger.Error().Str("urlstr", urlstr).Msg("raft bp urlstr shoud use https protocol") - return ErrNotHttpsURL - } - - return nil -} - -func isValidID(raftID uint64, lenBps int) error { - if raftID <= 0 || raftID > uint64(lenBps) { - logger.Error().Msg("raft raftID has the following values: 1 <= raft raftID <= len(bpcount)") - - return ErrInvalidRaftID - } - - return nil -} - -func (cc *Cluster) addMembers(raftCfg *config.RaftConfig, useTls bool) error { - lenBPs := len(raftCfg.RaftBPs) - if lenBPs == 0 { - return fmt.Errorf("config of raft bp is empty") - } - - // validate each bp - for i, raftBP := range raftCfg.RaftBPs { - if uint64(i+1) != raftBP.ID { - return ErrInvalidRaftBPID - } - - urlstr := raftBP.Url - trimUrl := strings.TrimSpace(urlstr) - - if err := isValidURL(urlstr, useTls); err != nil { - return err - } - - if err := isValidID(raftBP.ID, lenBPs); err != nil { - return err - } - - peerID, err := peer.IDB58Decode(raftBP.P2pID) - if err != nil { - return fmt.Errorf("invalid raft peerID %s", raftBP.P2pID) - } - - if err := cc.addMember(raftBP.ID, trimUrl, peerID); err != nil { - return err - } - } - - // TODO check my node pubkey from p2p - - return nil -} - -func parseToUrl(urlstr string) (*url.URL, error) { - var urlObj *url.URL - var err error - - if urlObj, err = url.Parse(urlstr); err != nil { - return nil, err - } - - if urlObj.Scheme != "http" && urlObj.Scheme != "https" { - return nil, ErrURLInvalidScheme - } - - if _, _, err := net.SplitHostPort(urlObj.Host); err != nil { - return nil, ErrURLInvalidPort - } - - return urlObj, nil -} diff --git a/consensus/impl/raft/raftserver.go b/consensus/impl/raft/raftserver.go deleted file mode 100644 index fbb5f5195..000000000 --- a/consensus/impl/raft/raftserver.go +++ /dev/null @@ -1,643 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "context" - "net/http" - "net/url" - "os" - "strconv" - "sync" - "sync/atomic" - "time" - - "github.com/aergoio/etcd/etcdserver/stats" - "github.com/aergoio/etcd/pkg/fileutil" - "github.com/aergoio/etcd/pkg/types" - raftlib "github.com/aergoio/etcd/raft" - "github.com/aergoio/etcd/raft/raftpb" - "github.com/aergoio/etcd/rafthttp" - "github.com/aergoio/etcd/snap" - "github.com/aergoio/etcd/wal" - "github.com/aergoio/etcd/wal/walpb" -) - -//noinspection ALL -var raftLogger raftlib.Logger - -func init() { - raftLogger = NewRaftLogger(logger) -} - -// A key-value stream backed by raft -type raftServer struct { - proposeC <-chan string // proposed messages (k,v) - confChangeC <-chan raftpb.ConfChange // proposed cluster config changes - commitC chan *string // entries committed to log (k,v) - errorC chan error // errors from raft session - - id uint64 // client ID for raft session - peers []string // raft peer URLs - listenUrl string - join bool // node is joining an existing cluster - waldir string // path to WAL directory - snapdir string // path to snapshot directory - getSnapshot func() ([]byte, error) - lastIndex uint64 // index of log at start - - confState raftpb.ConfState - snapshotIndex uint64 - appliedIndex uint64 - - // raft backing for the commit/error channel - node raftlib.Node - raftStorage *raftlib.MemoryStorage - wal *wal.WAL - - snapshotter *snap.Snapshotter - snapshotterReady chan *snap.Snapshotter // signals when snapshotter is ready - - snapCount uint64 - transport *rafthttp.Transport - stopc chan struct{} // signals proposal channel closed - httpstopc chan struct{} // signals http server to shutdown - httpdonec chan struct{} // signals http server shutdown complete - - leaderStatus LeaderStatus - - certFile string - keyFile string - - startSync bool // maybe this flag is unnecessary - lock sync.RWMutex - promotable bool - - tickMS time.Duration -} - -type LeaderStatus struct { - leader uint64 - leaderChanged uint64 -} - -var defaultSnapCount uint64 = 10000 -var snapshotCatchUpEntriesN uint64 = 10000 - -// newRaftServer initiates a raft instance and returns a committed log entry -// channel and error channel. Proposals for log updates are sent over the -// provided the proposal channel. All log entries are replayed over the -// commit channel, followed by a nil message (to indicate the channel is -// current), then new log entries. To shutdown, close proposeC and read errorC. -func newRaftServer(id uint64, listenUrl string, peers []string, join bool, waldir string, snapdir string, - certFile string, keyFile string, - getSnapshot func() ([]byte, error), tickMS time.Duration, - proposeC <-chan string, - confChangeC <-chan raftpb.ConfChange, - delayPromote bool) *raftServer { - - commitC := make(chan *string) - errorC := make(chan error) - - rs := &raftServer{ - proposeC: proposeC, - confChangeC: confChangeC, - commitC: commitC, - errorC: errorC, - id: id, - listenUrl: listenUrl, - peers: peers, - join: join, - waldir: waldir, - snapdir: snapdir, - getSnapshot: getSnapshot, - snapCount: defaultSnapCount, - stopc: make(chan struct{}), - httpstopc: make(chan struct{}), - httpdonec: make(chan struct{}), - - snapshotterReady: make(chan *snap.Snapshotter, 1), - // rest of structure populated after WAL replay - - certFile: certFile, - keyFile: keyFile, - - lock: sync.RWMutex{}, - promotable: true, - tickMS: tickMS, - } - - if listenUrl == "" { - rs.listenUrl = peers[rs.id-1] - } - - if delayPromote { - rs.SetPromotable(false) - } - - return rs -} - -func (rs *raftServer) SetPromotable(val bool) { - defer rs.lock.Unlock() - rs.lock.Lock() - rs.promotable = val -} - -func (rs *raftServer) GetPromotable() bool { - defer rs.lock.RUnlock() - - rs.lock.RLock() - val := rs.promotable - - return val -} - -func (rs *raftServer) Start() { - go rs.startRaft() -} - -func (rs *raftServer) startRaft() { - if !fileutil.Exist(rs.snapdir) { - if err := os.MkdirAll(rs.snapdir, 0750); err != nil { - logger.Error().Err(err).Msg("cannot create dir for snapshot") - } - } - rs.snapshotter = snap.New(rs.snapdir) - rs.snapshotterReady <- rs.snapshotter - - oldwal := wal.Exist(rs.waldir) - rs.wal = rs.replayWAL() - - rpeers := make([]raftlib.Peer, len(rs.peers)) - for i := range rpeers { - rpeers[i] = raftlib.Peer{ID: uint64(i + 1)} - } - c := &raftlib.Config{ - ID: uint64(rs.id), - ElectionTick: 10, - HeartbeatTick: 1, - Storage: rs.raftStorage, - MaxSizePerMsg: 1024 * 1024, - MaxInflightMsgs: 256, - Logger: raftLogger, - CheckQuorum: true, - PreVote: true, - } - - var node raftlib.Node - if oldwal { - node = raftlib.RestartNode(c) - } else { - startPeers := rpeers - if rs.join { - startPeers = nil - } - node = raftlib.StartNode(c, startPeers) - } - - logger.Debug().Msg("raft core node is started") - - // need locking for sync with consensusAccessor - rs.setNodeSync(node) - - rs.transport = &rafthttp.Transport{ - ID: types.ID(rs.id), - ClusterID: 0x1000, - Raft: rs, - ServerStats: stats.NewServerStats("", ""), - LeaderStats: stats.NewLeaderStats(strconv.FormatUint(rs.id, 10)), - ErrorC: make(chan error), - } - - rs.transport.SetLogger(httpLogger) - - rs.transport.Start() - for i := range rs.peers { - if uint64(i+1) != rs.id { - rs.transport.AddPeer(types.ID(i+1), []string{rs.peers[i]}) - } - } - - go rs.serveRaft() - go rs.serveChannels() -} - -func (rs *raftServer) setNodeSync(node raftlib.Node) { - defer rs.lock.Unlock() - - rs.lock.Lock() - rs.node = node -} - -func (rs *raftServer) getNodeSync() raftlib.Node { - defer rs.lock.RUnlock() - - var node raftlib.Node - rs.lock.RLock() - node = rs.node - - return node -} - -// stop closes http, closes all channels, and stops raft. -func (rs *raftServer) stop() { - rs.stopHTTP() - close(rs.commitC) - close(rs.errorC) - rs.node.Stop() -} - -func (rs *raftServer) stopHTTP() { - rs.transport.Stop() - close(rs.httpstopc) - <-rs.httpdonec -} - -func (rs *raftServer) writeError(err error) { - rs.stopHTTP() - close(rs.commitC) - rs.errorC <- err - close(rs.errorC) - rs.node.Stop() -} - -func (rs *raftServer) serveChannels() { - snapshot, err := rs.raftStorage.Snapshot() - if err != nil { - panic(err) - } - rs.confState = snapshot.Metadata.ConfState - rs.snapshotIndex = snapshot.Metadata.Index - rs.appliedIndex = snapshot.Metadata.Index - - defer rs.wal.Close() - - ticker := time.NewTicker(rs.tickMS) - defer ticker.Stop() - - // send proposals over raft - go func() { - var confChangeCount uint64 = 0 - - for rs.proposeC != nil && rs.confChangeC != nil { - select { - case prop, ok := <-rs.proposeC: - if !ok { - rs.proposeC = nil - } else { - // blocks until accepted by raft state machine - rs.node.Propose(context.TODO(), []byte(prop)) - } - - case cc, ok := <-rs.confChangeC: - if !ok { - rs.confChangeC = nil - } else { - confChangeCount += 1 - cc.ID = confChangeCount - rs.node.ProposeConfChange(context.TODO(), cc) - } - } - } - // client closed channel; shutdown raft if not already - close(rs.stopc) - }() - - // event loop on raft state machine updates - for { - select { - case <-ticker.C: - if rs.GetPromotable() { - rs.node.Tick() - } - - // store raft entries to wal, then publish over commit channel - case rd := <-rs.node.Ready(): - if len(rd.Entries) > 0 { - logger.Debug().Int("entries", len(rd.Entries)).Uint64("first", rd.Entries[0].Index).Int("commitentries", len(rd.CommittedEntries)).Msg("raft job ready") - } - if rd.SoftState != nil { - rs.updateLeader(rd.SoftState) - } - - rs.wal.Save(rd.HardState, rd.Entries) - if !raftlib.IsEmptySnap(rd.Snapshot) { - panic("snapshot occurred!!") - rs.saveSnap(rd.Snapshot) - rs.raftStorage.ApplySnapshot(rd.Snapshot) - rs.publishSnapshot(rd.Snapshot) - } - rs.raftStorage.Append(rd.Entries) - rs.transport.Send(rd.Messages) - if ok := rs.publishEntries(rs.entriesToApply(rd.CommittedEntries)); !ok { - rs.stop() - return - } - rs.maybeTriggerSnapshot() - rs.node.Advance() - case err := <-rs.transport.ErrorC: - rs.writeError(err) - return - - case <-rs.stopc: - rs.stop() - return - } - } -} - -func (rs *raftServer) serveRaft() { - urlstr := rs.listenUrl - urlData, err := url.Parse(urlstr) - if err != nil { - logger.Fatal().Err(err).Str("url", urlstr).Msg("Failed parsing URL") - } - - ln, err := newStoppableListener(urlData.Host, rs.httpstopc) - if err != nil { - logger.Fatal().Err(err).Str("url", urlstr).Msg("Failed to listen rafthttp") - } - - if len(rs.certFile) != 0 && len(rs.keyFile) != 0 { - logger.Info().Str("url", urlstr).Str("certfile", rs.certFile).Str("keyfile", rs.keyFile). - Msg("raft http server(tls) started") - - err = (&http.Server{Handler: rs.transport.Handler()}).ServeTLS(ln, rs.certFile, rs.keyFile) - } else { - logger.Info().Str("url", urlstr).Msg("raft http server started") - - err = (&http.Server{Handler: rs.transport.Handler()}).Serve(ln) - } - - select { - case <-rs.httpstopc: - default: - logger.Fatal().Err(err).Msg("Failed to serve rafthttp") - } - close(rs.httpdonec) -} - -func (rs *raftServer) loadSnapshot() *raftpb.Snapshot { - snapshot, err := rs.snapshotter.Load() - if err != nil && err != snap.ErrNoSnapshot { - logger.Fatal().Err(err).Msg("error loading snapshot") - } - return snapshot -} - -// openWAL returns a WAL ready for reading. -func (rs *raftServer) openWAL(snapshot *raftpb.Snapshot) *wal.WAL { - if !wal.Exist(rs.waldir) { - if err := os.MkdirAll(rs.waldir, 0750); err != nil { - logger.Fatal().Err(err).Msg("cannot create dir for wal") - } - - w, err := wal.Create(rs.waldir, nil) - if err != nil { - logger.Fatal().Err(err).Msg("create wal error") - } - - logger.Info().Str("dir", rs.waldir).Msg("create wal directory") - w.Close() - } - - walsnap := walpb.Snapshot{} - if snapshot != nil { - walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term - } - logger.Info().Uint64("term", walsnap.Term).Uint64("index", walsnap.Index).Msg("loading WAL at term %d and index") - w, err := wal.Open(rs.waldir, walsnap) - if err != nil { - logger.Fatal().Err(err).Msg("error loading wal") - } - - logger.Info().Msg("openwal done") - return w -} - -// replayWAL replays WAL entries into the raft instance. -func (rs *raftServer) replayWAL() *wal.WAL { - logger.Info().Uint64("raftid", rs.id).Msg("replaying WAL of member %d") - snapshot := rs.loadSnapshot() - w := rs.openWAL(snapshot) - _, st, ents, err := w.ReadAll() - if err != nil { - logger.Fatal().Err(err).Msg("failed to read WAL") - } - rs.raftStorage = raftlib.NewMemoryStorage() - if snapshot != nil { - rs.raftStorage.ApplySnapshot(*snapshot) - } - rs.raftStorage.SetHardState(st) - - // append to storage so raft starts at the right place in log - rs.raftStorage.Append(ents) - // send nil once lastIndex is published so client knows commit channel is current - if len(ents) > 0 { - rs.lastIndex = ents[len(ents)-1].Index - } else { - //commitChannel used for syncing startup - rs.commitC <- nil - rs.startSync = true - } - - logger.Info().Msg("replaying WAL done") - - return w -} - -func (rs *raftServer) maybeTriggerSnapshot() { - if rs.appliedIndex-rs.snapshotIndex <= rs.snapCount { - return - } - - logger.Info().Uint64("applied index", rs.appliedIndex).Uint64("last snapshot index", rs.snapshotIndex).Msg("start snapshot") - data, err := rs.getSnapshot() - if err != nil { - logger.Panic().Err(err).Msg("raft getsnapshot failed") - } - snapshot, err := rs.raftStorage.CreateSnapshot(rs.appliedIndex, &rs.confState, data) - if err != nil { - panic(err) - } - if err := rs.saveSnap(snapshot); err != nil { - panic(err) - } - - compactIndex := uint64(1) - if rs.appliedIndex > snapshotCatchUpEntriesN { - compactIndex = rs.appliedIndex - snapshotCatchUpEntriesN - } - if err := rs.raftStorage.Compact(compactIndex); err != nil { - panic(err) - } - - logger.Info().Uint64("index", compactIndex).Msg("compacted raftLog.at index") - rs.snapshotIndex = rs.appliedIndex -} - -func (rs *raftServer) publishSnapshot(snapshotToSave raftpb.Snapshot) { - if raftlib.IsEmptySnap(snapshotToSave) { - return - } - - logger.Info().Uint64("index", rs.snapshotIndex).Msg("publishing snapshot at index") - defer logger.Info().Uint64("index", rs.snapshotIndex).Msg("finished publishing snapshot at index") - - if snapshotToSave.Metadata.Index <= rs.appliedIndex { - logger.Fatal().Msgf("snapshot index [%d] should > progress.appliedIndex [%d] + 1", snapshotToSave.Metadata.Index, rs.appliedIndex) - } - rs.commitC <- nil // trigger kvstore to load snapshot - - rs.confState = snapshotToSave.Metadata.ConfState - rs.snapshotIndex = snapshotToSave.Metadata.Index - rs.appliedIndex = snapshotToSave.Metadata.Index -} - -func (rs *raftServer) saveSnap(snap raftpb.Snapshot) error { - // must save the snapshot index to the WAL before saving the - // snapshot to maintain the invariant that we only Open the - // wal at previously-saved snapshot indexes. - walSnap := walpb.Snapshot{ - Index: snap.Metadata.Index, - Term: snap.Metadata.Term, - } - if err := rs.wal.SaveSnapshot(walSnap); err != nil { - return err - } - if err := rs.snapshotter.SaveSnap(snap); err != nil { - return err - } - return rs.wal.ReleaseLockTo(snap.Metadata.Index) -} - -func (rs *raftServer) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) { - if len(ents) == 0 { - return - } - firstIdx := ents[0].Index - if firstIdx > rs.appliedIndex+1 { - logger.Fatal().Msgf("first index of committed entry[%d] should <= progress.appliedIndex[%d] 1", firstIdx, rs.appliedIndex) - } - if rs.appliedIndex-firstIdx+1 < uint64(len(ents)) { - nents = ents[rs.appliedIndex-firstIdx+1:] - } - return nents -} - -// publishEntries writes committed log entries to commit channel and returns -// whether all entries could be published. -func (rs *raftServer) publishEntries(ents []raftpb.Entry) bool { - for i := range ents { - switch ents[i].Type { - case raftpb.EntryNormal: - logger.Debug().Int("idx", i).Uint64("term", ents[i].Term).Msg("publish normal entry") - - if len(ents[i].Data) != 0 { - // it's only for unittest - s := string(ents[i].Data) - select { - case rs.commitC <- &s: - case <-rs.stopc: - return false - } - } - - case raftpb.EntryConfChange: - var cc raftpb.ConfChange - - cc.Unmarshal(ents[i].Data) - rs.confState = *rs.node.ApplyConfChange(cc) - - logger.Debug().Int("idx", i).Int32("type", int32(cc.Type)).Int("ctx", len(cc.Context)).Msg("publish confchange entry") - - switch cc.Type { - case raftpb.ConfChangeAddNode: - if len(cc.Context) > 0 { - rs.transport.AddPeer(types.ID(cc.NodeID), []string{string(cc.Context)}) - } - case raftpb.ConfChangeRemoveNode: - if cc.NodeID == uint64(rs.id) { - logger.Info().Msg("I've been removed from the cluster! Shutting down.") - return false - } - rs.transport.RemovePeer(types.ID(cc.NodeID)) - } - } - - // after commit, update appliedIndex - rs.appliedIndex = ents[i].Index - - // special nil commit to signal replay has finished - if ents[i].Index == rs.lastIndex { - if !rs.startSync { - logger.Debug().Uint64("idx", rs.lastIndex).Msg("published all entries of WAL") - - select { - case rs.commitC <- nil: - rs.startSync = true - case <-rs.stopc: - return false - } - } - } - } - return true -} - -func (rs *raftServer) Process(ctx context.Context, m raftpb.Message) error { - return rs.node.Step(ctx, m) -} -func (rs *raftServer) IsIDRemoved(id uint64) bool { return false } -func (rs *raftServer) ReportUnreachable(id uint64) {} -func (rs *raftServer) ReportSnapshot(id uint64, status raftlib.SnapshotStatus) {} - -func (rs *raftServer) WaitStartup() { - logger.Debug().Msg("raft start wait") - for s := range rs.commitC { - if s == nil { - break - } - } - logger.Debug().Msg("raft start succeed") -} - -func (rs *raftServer) updateLeader(softState *raftlib.SoftState) { - if softState.Lead != rs.GetLeader() { - atomic.StoreUint64(&rs.leaderStatus.leader, softState.Lead) - - rs.leaderStatus.leaderChanged++ - - logger.Info().Uint64("ID", rs.id).Uint64("leader", softState.Lead).Msg("leader changed") - } -} - -func (rs *raftServer) GetLeader() uint64 { - return atomic.LoadUint64(&rs.leaderStatus.leader) -} - -func (rs *raftServer) IsLeader() bool { - return rs.id == rs.GetLeader() -} - -func (rs *raftServer) Status() raftlib.Status { - node := rs.getNodeSync() - if node == nil { - return raftlib.Status{} - } - - return node.Status() -} diff --git a/consensus/impl/raft/raftserver_test.go b/consensus/impl/raft/raftserver_test.go deleted file mode 100644 index f8b03e5c5..000000000 --- a/consensus/impl/raft/raftserver_test.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "github.com/stretchr/testify/assert" - "os" - "testing" - "time" - - "github.com/aergoio/etcd/raft/raftpb" -) - -type cluster struct { - peers []string - commitC []<-chan *string - errorC []<-chan error - proposeC []chan string - confChangeC []chan raftpb.ConfChange - - rs []*raftServer -} - -var dataDirBase = "./rafttest" - -// newCluster creates a cluster of n nodes -func newCluster(n int, delayPromote bool) *cluster { - peers := make([]string, n) - for i := range peers { - peers[i] = fmt.Sprintf("http://127.0.0.1:%d", 10000+i) - } - - clus := &cluster{ - peers: peers, - commitC: make([]<-chan *string, len(peers)), - errorC: make([]<-chan error, len(peers)), - proposeC: make([]chan string, len(peers)), - confChangeC: make([]chan raftpb.ConfChange, len(peers)), - rs: make([]*raftServer, len(peers)), - } - - os.RemoveAll(dataDirBase) - - for i := range clus.peers { - waldir := fmt.Sprintf("%s/%d/wal", dataDirBase, i+1) - snapdir := fmt.Sprintf("%s/%d/snap", dataDirBase, i+1) - - clus.proposeC[i] = make(chan string, 1) - clus.confChangeC[i] = make(chan raftpb.ConfChange, 1) - - rs := newRaftServer(uint64(i+1), "", clus.peers, false, waldir, snapdir, "", "", nil, RaftTick, clus.proposeC[i], clus.confChangeC[i], delayPromote) - clus.rs[i] = rs - clus.commitC[i] = rs.commitC - clus.errorC[i] = rs.errorC - rs.Start() - } - - return clus -} - -// sinkReplay reads all commits in each node's local log. -func (clus *cluster) sinkReplay() { - for i := range clus.peers { - for s := range clus.commitC[i] { - if s == nil { - break - } - } - } -} - -// Close closes all cluster nodes and returns an error if any failed. -func (clus *cluster) Close() (err error) { - for i := range clus.peers { - close(clus.proposeC[i]) - for range clus.commitC[i] { - // drain pending commits - } - // wait for channel to close - if erri := <-clus.errorC[i]; erri != nil { - err = erri - } - } - - os.RemoveAll(dataDirBase) - - return err -} - -func (clus *cluster) closeNoErrors(t *testing.T) { - if err := clus.Close(); err != nil { - t.Fatal(err) - } -} - -// TestProposeOnCommit starts three nodes and feeds commits back into the proposal -// channel. The intent is to ensure blocking on a proposal won't block raft progress. -func TestProposeOnCommit(t *testing.T) { - clus := newCluster(3, false) - defer clus.closeNoErrors(t) - - //wait creation of all Raft nodes - clus.sinkReplay() - - donec := make(chan struct{}) - for i := range clus.peers { - // feedback for "n" committed entries, then update donec - go func(i int, pC chan<- string, cC <-chan *string, eC <-chan error) { - for n := 0; n < 100; n++ { - s, ok := <-cC - if !ok { - pC = nil - } - //t.Logf("raft node [%d][%d] commit", i, n) - select { - case pC <- *s: - continue - case err := <-eC: - t.Fatalf("eC message (%v)", err) - } - } - t.Logf("raft node [%d] done", i) - donec <- struct{}{} - for range cC { - // acknowledge the commits from other nodes so - // raft continues to make progress - } - }(i, clus.proposeC[i], clus.commitC[i], clus.errorC[i]) - - // one message feedback per node - go func(i int) { clus.proposeC[i] <- "foo" }(i) - } - - for range clus.peers { - <-donec - } -} - -// TestCloseProposerBeforeReplay tests closing the producer before raft starts. -func TestCloseProposerBeforeReplay(t *testing.T) { - clus := newCluster(1, false) - // close before replay so raft never starts - defer clus.closeNoErrors(t) -} - -// TestCloseProposerInflight tests closing the producer while -// committed messages are being published to the client. -func TestCloseProposerInflight(t *testing.T) { - clus := newCluster(1, false) - defer clus.closeNoErrors(t) - - clus.sinkReplay() - - // some inflight ops - go func() { - clus.proposeC[0] <- "foo" - clus.proposeC[0] <- "bar" - }() - - // wait for one message - if c, ok := <-clus.commitC[0]; *c != "foo" || !ok { - t.Fatalf("Commit failed") - } -} - -func TestRaftDelayPromotable(t *testing.T) { - clus := newCluster(3, true) - - defer clus.closeNoErrors(t) - - //wait creation of all Raft nodes - clus.sinkReplay() - - //2ê°œ nodeê°€ promotable ë˜ë©´ leader ê²°ì • - t.Log("replay ready") - - checkHasLeader := func(has bool) { - res := false - for _, raftserver := range clus.rs { - if raftserver.IsLeader() { - res = true - } - } - - assert.Equal(t, has, res) - } - - time.Sleep(time.Second * 1) - checkHasLeader(false) - - clus.rs[0].SetPromotable(true) - - time.Sleep(time.Second * 5) - checkHasLeader(true) -} - -/* -func TestRaftIsLeader(t *testing.T) { - clus := newCluster(1) - defer clus.closeNoErrors(t) - - clus.sinkReplay() - - // check who is leader - -}*/ diff --git a/consensus/impl/raftv2/blockfactory.go b/consensus/impl/raftv2/blockfactory.go new file mode 100644 index 000000000..a088ada57 --- /dev/null +++ b/consensus/impl/raftv2/blockfactory.go @@ -0,0 +1,623 @@ +package raftv2 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pkey" + "runtime" + "sync" + "time" + + "github.com/aergoio/aergo/internal/enc" + "github.com/libp2p/go-libp2p-crypto" + + "github.com/aergoio/aergo-lib/log" + bc "github.com/aergoio/aergo/chain" + "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/consensus/chain" + "github.com/aergoio/aergo/contract" + "github.com/aergoio/aergo/pkg/component" + "github.com/aergoio/aergo/state" + "github.com/aergoio/aergo/types" +) + +const ( + slotQueueMax = 100 + DefaultCommitQueueLen = 10 +) + +var ( + logger *log.Logger + httpLogger *log.Logger + + // blockIntervalMs is the block genration interval in milli-seconds. + RaftTick = DefaultTickMS + RaftSkipEmptyBlock = false + MaxCommitQueueLen = DefaultCommitQueueLen + + BlockTimeout time.Duration +) + +var ( + ErrClusterNotReady = errors.New("cluster is not ready") + ErrNotRaftLeader = errors.New("this node is not leader") +) + +func init() { + logger = log.NewLogger("raft") + httpLogger = log.NewLogger("rafthttp") +} + +type txExec struct { + execTx bc.TxExecFn +} + +func newTxExec(cdb consensus.ChainDB, blockNo types.BlockNo, ts int64, prevHash []byte, chainID []byte) chain.TxOp { + // Block hash not determined yet + return &txExec{ + execTx: bc.NewTxExecutor(contract.ChainAccessor(cdb), blockNo, ts, prevHash, contract.BlockFactory, chainID), + } +} + +func (te *txExec) Apply(bState *state.BlockState, tx types.Transaction) error { + err := te.execTx(bState, tx) + return err +} + +type Work struct { + *types.Block +} + +func (work *Work) GetTimeout() time.Duration { + return BlockTimeout +} + +func (work *Work) ToString() string { + return fmt.Sprintf("bestblock=%s", work.BlockID()) +} + +// BlockFactory implments a raft block factory which generate block each cfg.Consensus.BlockInterval if this node is leader of raft +// +// This can be used for testing purpose. +type BlockFactory struct { + *component.ComponentHub + consensus.ChainWAL + + bpc *Cluster + + workerQueue chan *Work + jobQueue chan interface{} + bpTimeoutC chan interface{} + quit chan interface{} + + maxBlockBodySize uint32 + ID string + privKey crypto.PrivKey + txOp chain.TxOp + sdb *state.ChainStateDB + prevBlock *types.Block // best block of last job + jobLock sync.RWMutex + + raftOp *RaftOperator + raftServer *raftServer +} + +// GetName returns the name of the consensus. +func GetName() string { + return consensus.ConsensusName[consensus.ConsensusRAFT] +} + +// GetConstructor build and returns consensus.Constructor from New function. +func GetConstructor(cfg *config.Config, hub *component.ComponentHub, cdb consensus.ChainWAL, + sdb *state.ChainStateDB, pa p2pcommon.PeerAccessor) consensus.Constructor { + return func() (consensus.Consensus, error) { + return New(cfg, hub, cdb, sdb, pa) + } +} + +// New returns a BlockFactory. +func New(cfg *config.Config, hub *component.ComponentHub, cdb consensus.ChainWAL, + sdb *state.ChainStateDB, pa p2pcommon.PeerAccessor) (*BlockFactory, error) { + + Init(consensus.BlockInterval) + + bf := &BlockFactory{ + ComponentHub: hub, + ChainWAL: cdb, + jobQueue: make(chan interface{}, slotQueueMax), + workerQueue: make(chan *Work), + bpTimeoutC: make(chan interface{}, 1), + quit: make(chan interface{}), + maxBlockBodySize: chain.MaxBlockBodySize(), + ID: p2pkey.NodeSID(), + privKey: p2pkey.NodePrivKey(), + sdb: sdb, + } + + if cfg.Consensus.EnableBp { + if err := bf.newRaftServer(cfg); err != nil { + logger.Error().Err(err).Msg("failed to init raft server") + return bf, err + } + + bf.raftServer.SetPeerAccessor(pa) + } + + bf.txOp = chain.NewCompTxOp( + // timeout check + chain.TxOpFn(func(bState *state.BlockState, txIn types.Transaction) error { + return bf.checkBpTimeout() + }), + ) + + return bf, nil +} + +func Init(blockInterval time.Duration) { + logger.Debug().Int64("timeout(ms)", BlockTimeout.Nanoseconds()/int64(time.Millisecond)).Msg("set block timeout") + + BlockTimeout = blockInterval +} + +func (bf *BlockFactory) newRaftServer(cfg *config.Config) error { + if err := bf.InitCluster(cfg); err != nil { + return err + } + + bf.raftOp = newRaftOperator(bf.raftServer) + + logger.Info().Str("name", bf.bpc.NodeName()).Msg("create raft server") + + bf.raftServer = newRaftServer(bf.ComponentHub, bf.bpc, cfg.Consensus.Raft.ListenUrl, !cfg.Consensus.Raft.NewCluster, + cfg.Consensus.Raft.CertFile, cfg.Consensus.Raft.KeyFile, nil, + RaftTick, bf.bpc.confChangeC, bf.raftOp.commitC, false, bf.ChainWAL) + + bf.bpc.rs = bf.raftServer + bf.raftOp.rs = bf.raftServer + + return nil +} + +// Ticker returns a time.Ticker for the main consensus loop. +func (bf *BlockFactory) Ticker() *time.Ticker { + return time.NewTicker(consensus.BlockInterval) +} + +// QueueJob send a block triggering information to jq. +func (bf *BlockFactory) QueueJob(now time.Time, jq chan<- interface{}) { + bf.jobLock.Lock() + defer bf.jobLock.Unlock() + + if !bf.raftServer.IsLeader() { + logger.Debug().Msg("skip producing block because this bp is not leader") + return + } + + if b, _ := bf.GetBestBlock(); b != nil { + if bf.prevBlock != nil && bf.prevBlock.BlockNo() == b.BlockNo() { + logger.Debug().Uint64("bestno", b.BlockNo()).Msg("previous block not connected. skip to generate block") + return + } + + // If requested block remains in commit channel, block factory must wait until all requests are completed. + // otherwise block of same height will be created and a fork will occur. + if !bf.raftServer.commitProgress.IsReadyToPropose() { + logger.Debug().Uint64("bestno", b.BlockNo()).Msg("pending request block not connected. skip to generate block") + return + } + + bf.prevBlock = b + jq <- &Work{b} + } +} + +func (bf *BlockFactory) GetType() consensus.ConsensusType { + return consensus.ConsensusRAFT +} + +// IsTransactionValid checks the onsensus level validity of a transaction +func (bf *BlockFactory) IsTransactionValid(tx *types.Tx) bool { + // BlockFactory has no tx valid check. + return true +} + +// VerifyTimestamp checks the validity of the block timestamp. +func (bf *BlockFactory) VerifyTimestamp(*types.Block) bool { + // BlockFactory don't need to check timestamp. + return true +} + +// VerifySign checks the consensus level validity of a block. +func (bf *BlockFactory) VerifySign(block *types.Block) error { + valid, err := block.VerifySign() + if !valid || err != nil { + return &consensus.ErrorConsensus{Msg: "bad block signature", Err: err} + } + return nil +} + +// IsBlockValid checks the consensus level validity of a block. +func (bf *BlockFactory) IsBlockValid(block *types.Block, bestBlock *types.Block) error { + // BlockFactory has no block valid check. + _, err := block.BPID() + if err != nil { + return &consensus.ErrorConsensus{Msg: "bad public key in block", Err: err} + } + return nil +} + +// QuitChan returns the channel from which consensus-related goroutines check +// when shutdown is initiated. +func (bf *BlockFactory) QuitChan() chan interface{} { + return bf.quit +} + +// Update has nothging to do. +func (bf *BlockFactory) Update(block *types.Block) { +} + +// Save has nothging to do. +func (bf *BlockFactory) Save(tx consensus.TxWriter) error { + return nil +} + +// BlockFactory returns r itself. +func (bf *BlockFactory) BlockFactory() consensus.BlockFactory { + return bf +} + +// NeedReorganization has nothing to do. +func (bf *BlockFactory) NeedReorganization(rootNo types.BlockNo) bool { + return true +} + +// Start run a raft block factory service. +func (bf *BlockFactory) Start() { + bf.raftServer.Start() + + go bf.worker() + go bf.controller() +} + +func (bf *BlockFactory) controller() { + defer shutdownMsg("block factory controller") + + beginBlock := func(work *Work) error { + // This is only for draining an unconsumed message, which means + // the previous block is generated within timeout. This code + // is needed since an empty block will be generated without it. + if err := bf.checkBpTimeout(); err == chain.ErrQuit { + return err + } + + select { + case bf.workerQueue <- work: + default: + logger.Error().Msgf( + "skip block production for %s due to a pending job", work.ToString()) + } + return nil + } + + notifyBpTimeout := func(work *Work) { + timeout := work.GetTimeout() + time.Sleep(timeout) + bf.bpTimeoutC <- struct{}{} + logger.Debug().Int64("timeout(ms)", timeout.Nanoseconds()/int64(time.Millisecond)).Msg("block production timeout signaled") + } + + for { + select { + case info := <-bf.jobQueue: + work := info.(*Work) + + logger.Debug().Msgf("received work: %s", + log.DoLazyEval(func() string { return work.ToString() })) + + err := beginBlock(work) + if err == chain.ErrQuit { + return + } else if err != nil { + logger.Debug().Err(err).Msg("skip block production") + continue + } + + notifyBpTimeout(work) + + case <-bf.quit: + return + } + } +} + +// worker() is different for each consensus +func (bf *BlockFactory) worker() { + defer logger.Info().Msg("shutdown initiated. stop the service") + + runtime.LockOSThread() + + for { + select { + case work := <-bf.workerQueue: + if err := bf.generateBlock(work.Block); err != nil { + if err == chain.ErrQuit { + logger.Info().Msg("quit worker of block factory") + return + } + + logger.Error().Err(err).Msg("failed to produce block") + } + + case cEntry, ok := <-bf.commitC(): + logger.Debug().Msg("received block to connect from raft") + + if !ok { + logger.Fatal().Msg("commit channel for raft is closed") + return + } + + if cEntry.block == nil { + bf.reset() + continue + } + + // add block that has produced by remote BP + if err := bf.connect(cEntry.block); err != nil { + logger.Error().Err(err).Msg("failed to connect block") + return + } + + bf.raftServer.commitProgress.UpdateConnect(cEntry) + case <-bf.quit: + return + } + } +} + +func (bf *BlockFactory) generateBlock(bestBlock *types.Block) (err error) { + defer func() { + if panicMsg := recover(); panicMsg != nil { + err = fmt.Errorf("panic ocurred during block generation - %v", panicMsg) + } + }() + + blockState := bf.sdb.NewBlockState(bestBlock.GetHeader().GetBlocksRootHash()) + + ts := time.Now().UnixNano() + + txOp := chain.NewCompTxOp( + bf.txOp, + newTxExec(bf.ChainWAL, bestBlock.GetHeader().GetBlockNo()+1, ts, bestBlock.GetHash(), bestBlock.GetHeader().GetChainID()), + ) + + block, err := chain.GenerateBlock(bf, bestBlock, blockState, txOp, ts, RaftSkipEmptyBlock) + if err == chain.ErrBlockEmpty { + return nil + } else if err != nil { + logger.Info().Err(err).Msg("failed to generate block") + return err + } + + if err = block.Sign(bf.privKey); err != nil { + logger.Error().Err(err).Msg("failed to sign in block") + return nil + } + + logger.Info().Str("blockProducer", bf.ID).Str("raftID", block.ID()). + Str("sroot", enc.ToString(block.GetHeader().GetBlocksRootHash())). + Uint64("no", block.GetHeader().GetBlockNo()). + Str("hash", block.ID()). + Msg("block produced") + + if !bf.raftServer.IsLeader() { + logger.Info().Msg("dropped produced block because this bp became no longer leader") + return nil + } + + bf.raftOp.propose(block, blockState) + + return nil +} + +func (bf *BlockFactory) commitC() chan *commitEntry { + return bf.raftOp.commitC +} + +func (bf *BlockFactory) reset() { + bf.jobLock.Lock() + defer bf.jobLock.Unlock() + + logger.Debug().Str("prev proposed", bf.raftOp.toString()).Msg("commit nil data, so reset block factory") + + bf.prevBlock = nil +} + +// save block/block state to connect after commit +func (bf *BlockFactory) connect(block *types.Block) error { + proposed := bf.raftOp.proposed + var blockState *state.BlockState + + if proposed != nil { + if !bytes.Equal(block.BlockHash(), proposed.block.BlockHash()) { + logger.Warn().Uint64("prop-no", proposed.block.GetHeader().GetBlockNo()).Str("prop", proposed.block.ID()).Uint64("commit-no", block.GetHeader().GetBlockNo()).Str("commit", block.ID()).Msg("commited block is not proposed by me. this node is probably not leader") + bf.raftOp.resetPropose() + } else { + blockState = proposed.blockState + } + } + + logger.Debug().Uint64("no", block.BlockNo()). + Str("hash", block.ID()). + Str("prev", block.PrevID()). + Bool("proposed", blockState != nil). + Msg("connect block") + + // if bestblock is changed, connecting block failed. new block is generated in next tick + // On a slow server, chain service takes too long to add block in blockchain. In this case, raft server waits to send new block to commit channel. + if err := chain.ConnectBlock(bf, block, blockState, time.Second*300); err != nil { + logger.Error().Msg(err.Error()) + return err + } + + return nil +} + +/* +// waitUntilStartable wait until this chain synchronizes with more than half of all peers +func (bf *BlockFactory) waitSyncWithMajority() error { + ticker := time.NewTicker(peerCheckInterval) + + for { + select { + case <-ticker.C: + if synced, err := bf.bpc.hasSynced(); err != nil { + logger.Error().Err(err).Msg("failed to check sync with a majority of peers") + return err + } else if synced { + return nil + } + + case <-bf.QuitChan(): + logger.Info().Msg("quit while wait sync") + return ErrBFQuit + default: + } + } +} +*/ +// JobQueue returns the queue for block production triggering. +func (bf *BlockFactory) JobQueue() chan<- interface{} { + return bf.jobQueue +} + +// Info retuns an empty string. +func (bf *BlockFactory) Info() string { + // TODO: Returns a appropriate information inx json format like current + // leader, etc. + info := consensus.NewInfo(GetName()) + if bf.raftServer == nil { + return info.AsJSON() + } + + b, err := json.Marshal(bf.bpc.getRaftInfo(false)) + if err != nil { + logger.Error().Err(err).Msg("failed to marshalEntryData raft consensus") + } else { + m := json.RawMessage(b) + info.Status = &m + } + + return info.AsJSON() +} + +func (bf *BlockFactory) ConsensusInfo() *types.ConsensusInfo { + if bf.bpc == nil { + return &types.ConsensusInfo{Type: GetName()} + } + return bf.bpc.toConsensusInfo() +} + +func (bf *BlockFactory) NeedNotify() bool { + return false +} + +func (bf *BlockFactory) HasWAL() bool { + return true +} + +type ErrorMembershipChange struct { + Err error +} + +func (e ErrorMembershipChange) Error() string { + return fmt.Sprintf("failed to change membership: %s", e.Err.Error()) +} + +// ConfChange change membership of raft cluster and returns new membership +func (bf *BlockFactory) ConfChange(req *types.MembershipChange) (*consensus.Member, error) { + if bf.bpc == nil { + return nil, ErrorMembershipChange{ErrClusterNotReady} + } + + if !bf.raftServer.IsLeader() { + return nil, ErrorMembershipChange{ErrNotRaftLeader} + } + + var member *consensus.Member + var err error + if member, err = bf.bpc.ChangeMembership(req); err != nil { + return nil, ErrorMembershipChange{err} + } + + return member, nil +} + +func (bf *BlockFactory) ClusterInfo() ([]*types.MemberAttr, []byte, error) { + return bf.bpc.getMemberAttrs(), bf.bpc.chainID, nil +} + +func (bf *BlockFactory) checkBpTimeout() error { + select { + case <-bf.bpTimeoutC: + return chain.ErrTimeout{Kind: "block"} + case <-bf.quit: + return chain.ErrQuit + default: + return nil + } +} + +type Proposed struct { + block *types.Block + blockState *state.BlockState +} + +type RaftOperator struct { + confChangeC chan *types.MembershipChange + commitC chan *commitEntry + + rs *raftServer + + proposed *Proposed +} + +func newRaftOperator(rs *raftServer) *RaftOperator { + confChangeC := make(chan *types.MembershipChange, 1) + commitC := make(chan *commitEntry, MaxCommitQueueLen) + + return &RaftOperator{confChangeC: confChangeC, commitC: commitC, rs: rs} +} + +func (rop *RaftOperator) propose(block *types.Block, blockState *state.BlockState) { + rop.proposed = &Proposed{block: block, blockState: blockState} + + if err := rop.rs.Propose(block); err != nil { + logger.Error().Err(err).Msg("propose error to raft") + return + } + + logger.Info().Msg("block proposed by blockfactory") +} + +func (rop *RaftOperator) resetPropose() { + rop.proposed = nil + logger.Debug().Msg("reset proposed block") +} + +func (rop *RaftOperator) toString() string { + buf := "proposed:" + if rop.proposed != nil && rop.proposed.block != nil { + buf = buf + fmt.Sprintf("[no=%d, hash=%s]", rop.proposed.block.BlockNo(), rop.proposed.block.BlockID().String()) + } else { + buf = buf + "empty" + } + return buf +} + +func shutdownMsg(m string) { + logger.Info().Msgf("shutdown initiated. stop the %s", m) +} diff --git a/consensus/impl/raftv2/cluster.go b/consensus/impl/raftv2/cluster.go new file mode 100644 index 000000000..969ef4392 --- /dev/null +++ b/consensus/impl/raftv2/cluster.go @@ -0,0 +1,912 @@ +package raftv2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sort" + "strconv" + "sync" + "time" + + "github.com/aergoio/aergo/cmd/aergocli/util" + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/p2p/p2pkey" + "github.com/aergoio/aergo/pkg/component" + "github.com/aergoio/aergo/types" + raftlib "github.com/aergoio/etcd/raft" + "github.com/aergoio/etcd/raft/raftpb" + "github.com/libp2p/go-libp2p-peer" +) + +var ( + MaxConfChangeTimeOut = time.Second * 10 + + ErrClusterHasNoMember = errors.New("cluster has no member") + ErrNotExistRaftMember = errors.New("not exist member of raft cluster") + ErrNoEnableSyncPeer = errors.New("no peer to sync chain") + ErrNotExistMembers = errors.New("not exist members of cluster") + ErrMemberAlreadyApplied = errors.New("member is already added") + + ErrInvalidMembershipReqType = errors.New("invalid type of membership change request") + ErrPendingConfChange = errors.New("pending membership change request is in progree. try again when it is finished") + ErrConChangeTimeOut = errors.New("timeouted membership change request") + ErrConfChangeChannelBusy = errors.New("channel of conf change propose is busy") + ErrCCMemberIsNil = errors.New("memeber is nil") + ErrNotMatchedRaftName = errors.New("mismatched name of raft identity") +) + +const ( + MembersNameInit = "init" + MembersNameApplied = "applied" + MembersNameRemoved = "removed" +) + +type RaftInfo struct { + Leader string + Total string + Name string + RaftId string + Status *json.RawMessage +} + +// raft cluster membership +// copy from dpos/bp +// TODO refactoring +// Cluster represents a cluster of block producers. +type Cluster struct { + component.ICompSyncRequester + sync.Mutex + cdb consensus.ChainDB + + chainID []byte + chainTimestamp int64 + rs *raftServer + + appliedIndex uint64 + appliedTerm uint64 + + identity consensus.RaftIdentity + + Size uint32 + + // @ MatchClusterAndConfState + // cluster members must match nodes of confstate. otherwise confchange may fail and be skipped by comparing with cluster members. + // Mismatch of cluster and confstate occures when node joins a exising cluster. Joined node starts from latest members, but confstate is empty. + // If snapshot is written before all confchange logs be applied, mismatched state is written to disk. + // After recovery from snapshot, problems will happen. + members *Members // using for 1. booting + // 2. send cluster info to remote + appliedMembers *Members // using for 1. verifying runtime confchange. + // 2. creating snapshot + // 3. recover from snapshot + + // raft http reject message from removed member + // TODO for p2p + removedMembers *Members + + changeSeq uint64 + confChangeC chan *consensus.ConfChangePropose + + savedChange *consensus.ConfChangePropose +} + +type Members struct { + name string + MapByID map[uint64]*consensus.Member // restore from DB or snapshot + MapByName map[string]*consensus.Member + + Index map[peer.ID]uint64 // peer ID to raft ID mapping + + BPUrls []string //for raft server TODO remove +} + +func newMembers(name string) *Members { + return &Members{ + name: name, + MapByID: make(map[uint64]*consensus.Member), + MapByName: make(map[string]*consensus.Member), + Index: make(map[peer.ID]uint64), + BPUrls: make([]string, 0), + } +} + +func (mbrs *Members) len() int { + return len(mbrs.MapByID) +} + +func (mbrs *Members) ToArray() []*consensus.Member { + count := len(mbrs.MapByID) + + var arrs = make([]*consensus.Member, count) + + i := 0 + for _, m := range mbrs.MapByID { + arrs[i] = m + i++ + } + + return arrs +} + +func (mbrs *Members) toString() string { + var buf string + + buf += fmt.Sprintf("%s", mbrs.name) + + if mbrs == nil { + return "[]" + } + + buf += fmt.Sprintf("[") + for _, bp := range mbrs.MapByID { + buf += fmt.Sprintf("%s", bp.ToString()) + } + buf += fmt.Sprintf("]") + + return buf +} + +func NewCluster(chainID []byte, bf *BlockFactory, raftName string, chainTimestamp int64) *Cluster { + cl := &Cluster{ + chainID: chainID, + chainTimestamp: chainTimestamp, + ICompSyncRequester: bf, + identity: consensus.RaftIdentity{Name: raftName}, + members: newMembers(MembersNameInit), + appliedMembers: newMembers(MembersNameApplied), + removedMembers: newMembers(MembersNameRemoved), + confChangeC: make(chan *consensus.ConfChangePropose), + } + if bf != nil { + cl.cdb = bf.ChainWAL + } + + return cl +} + +func NewClusterFromMemberAttrs(chainID []byte, memberAttrs []*types.MemberAttr) (*Cluster, error) { + cl := NewCluster(chainID, nil, "", 0) + + for _, mbrAttr := range memberAttrs { + var mbr consensus.Member + + mbr.SetAttr(mbrAttr) + + if err := cl.isValidMember(&mbr); err != nil { + logger.Error().Err(err).Str("mbr", mbr.ToString()).Msg("fail to add member") + return nil, err + } + + if err := cl.addMember(&mbr, false); err != nil { + logger.Error().Err(err).Str("mbr", mbr.ToString()).Msg("fail to add member") + return nil, err + } + } + + return cl, nil +} + +func (cl *Cluster) NodeName() string { + return cl.identity.Name +} + +func (cl *Cluster) NodeID() uint64 { + return cl.identity.ID +} + +func (cl *Cluster) SetNodeID(nodeid uint64) { + cl.identity.ID = nodeid +} + +// RecoverIdentity reset node id and name of cluster. +// raft identity is saved in WAL and reset when server is restarted +func (cl *Cluster) RecoverIdentity(id *consensus.RaftIdentity) error { + cl.Lock() + defer cl.Unlock() + + // check name + if cl.identity.Name != id.Name { + logger.Error().Str("walidentity", id.ToString()).Str("configname", cl.identity.Name) + return ErrNotMatchedRaftName + } + + cl.identity = *id + + logger.Info().Str("identity", id.ToString()).Msg("recover raft identity of this node") + + return nil +} + +func (cl *Cluster) Recover(snapshot *raftpb.Snapshot) error { + var snapdata = &consensus.SnapshotData{} + + if err := snapdata.Decode(snapshot.Data); err != nil { + return err + } + + logger.Info().Str("snap", snapdata.ToString()).Msg("cluster recover from snapshot") + cl.ResetMembers() + + // members restore + for _, mbr := range snapdata.Members { + cl.addMember(mbr, true) + } + + for _, mbr := range snapdata.RemovedMembers { + cl.RemovedMembers().add(mbr) + } + + logger.Info().Str("info", cl.toStringWithLock()).Msg("cluster recovered") + + return nil +} + +func (cl *Cluster) ResetMembers() { + cl.Lock() + defer cl.Unlock() + + cl.members = newMembers(MembersNameInit) + cl.appliedMembers = newMembers(MembersNameApplied) + cl.removedMembers = newMembers(MembersNameRemoved) + + cl.Size = 0 +} + +func (cl *Cluster) isMatch(confstate *raftpb.ConfState) bool { + var matched int + + if len(cl.members.MapByID) != len(confstate.Nodes) { + return false + } + + for _, confID := range confstate.Nodes { + if _, ok := cl.members.MapByID[confID]; !ok { + return false + } + + matched++ + } + + return true +} + +func (cl *Cluster) Members() *Members { + return cl.members +} + +func (cl *Cluster) AppliedMembers() *Members { + return cl.appliedMembers +} + +func (cl *Cluster) RemovedMembers() *Members { + return cl.removedMembers +} + +func (cl *Cluster) Quorum() uint32 { + return cl.Size/2 + 1 +} + +func (cl *Cluster) getStartPeers() ([]raftlib.Peer, error) { + cl.Lock() + defer cl.Unlock() + + if cl.Size == 0 { + return nil, ErrClusterHasNoMember + } + + rpeers := make([]raftlib.Peer, cl.Size) + + var i int + for _, member := range cl.members.MapByID { + data, err := json.Marshal(member) + if err != nil { + return nil, err + } + rpeers[i] = raftlib.Peer{ID: uint64(member.ID), Context: data} + i++ + } + + return rpeers, nil +} + +// getAnyPeerAddressToSync returns peer address that has block of no for sync +func (cl *Cluster) getAnyPeerAddressToSync() (peer.ID, error) { + cl.Lock() + defer cl.Unlock() + + for _, member := range cl.Members().MapByID { + if member.Name != cl.NodeName() { + return member.GetPeerID(), nil + } + } + + return "", ErrNoEnableSyncPeer +} + +func (cl *Cluster) isValidMember(member *consensus.Member) error { + cl.Lock() + defer cl.Unlock() + + mbrs := cl.members + + for _, prevMember := range mbrs.MapByID { + if prevMember.HasDuplicatedAttr(member) { + logger.Error().Str("prev", prevMember.ToString()).Str("cur", member.ToString()).Msg("duplicated configuration for raft BP member") + return ErrDupBP + } + } + + // check if peerID of this node is valid + // check if peerID of this node is valid + if cl.NodeName() == member.Name && member.GetPeerID() != p2pkey.NodeID() { + return ErrInvalidRaftPeerID + } + + return nil +} + +func (cl *Cluster) addMember(member *consensus.Member, applied bool) error { + logger.Info().Str("member", member.ToString()).Msg("member add") + + cl.Lock() + defer cl.Unlock() + + if applied { + if cl.AppliedMembers().isExist(member.ID) { + return ErrMemberAlreadyApplied + } + cl.AppliedMembers().add(member) + } + + if cl.members.isExist(member.ID) { + return nil + } + + cl.members.add(member) + cl.Size++ + + return nil +} + +func (cl *Cluster) removeMember(member *consensus.Member) error { + logger.Info().Str("member", member.ToString()).Msg("member remove") + + cl.Lock() + defer cl.Unlock() + + cl.AppliedMembers().remove(member) + cl.members.remove(member) + cl.removedMembers.add(member) + + cl.Size-- + + return nil +} + +// ValidateAndMergeExistingCluster tests if members of existing cluster are matched with this cluster +func (cl *Cluster) ValidateAndMergeExistingCluster(existingCl *Cluster) bool { + cl.Lock() + defer cl.Unlock() + + myMembers := cl.Members().ToArray() + exMembers := existingCl.Members().ToArray() + + if len(myMembers) != len(exMembers) { + return false + } + + // sort by name + sort.Sort(consensus.MembersByName(myMembers)) + sort.Sort(consensus.MembersByName(exMembers)) + + for i, myMember := range myMembers { + exMember := exMembers[i] + if !myMember.IsCompatible(exMember) { + logger.Error().Str("mymember", myMember.ToString()).Str("existmember", exMember.ToString()).Msg("not compatible with existing member configuration") + return false + } + + myMember.SetMemberID(exMember.GetID()) + } + + myNodeID := existingCl.getNodeID(cl.NodeName()) + + // reset self nodeID of cluster + cl.SetNodeID(myNodeID) + + logger.Debug().Str("my", cl.toStringWithLock()).Msg("cluster merged with existing cluster") + return true +} + +func (cl *Cluster) getMemberAttrs() []*types.MemberAttr { + cl.Lock() + defer cl.Unlock() + + attrs := make([]*types.MemberAttr, cl.members.len()) + + var i = 0 + for _, mbr := range cl.members.MapByID { + // copy attr since it can be modified + attr := mbr.MemberAttr + attrs[i] = &attr + i++ + } + + return attrs +} + +// IsIDRemoved return true if given raft id is not exist in cluster +func (cl *Cluster) IsIDRemoved(id uint64) bool { + return cl.RemovedMembers().isExist(id) +} + +func (mbrs *Members) add(member *consensus.Member) { + mbrs.MapByID[member.ID] = member + mbrs.MapByName[member.Name] = member + mbrs.Index[member.GetPeerID()] = member.ID + mbrs.BPUrls = append(mbrs.BPUrls, member.Url) +} + +func (mbrs *Members) remove(member *consensus.Member) { + delete(mbrs.MapByID, member.ID) + delete(mbrs.MapByName, member.Name) + delete(mbrs.Index, member.GetPeerID()) +} + +func (mbrs *Members) getMemberByName(name string) *consensus.Member { + member, ok := mbrs.MapByName[name] + if !ok { + return nil + } + + return member +} + +func (mbrs *Members) isExist(id uint64) bool { + return mbrs.getMember(id) != nil +} + +func (mbrs *Members) getMember(id uint64) *consensus.Member { + member, ok := mbrs.MapByID[id] + if !ok { + return nil + } + + return member +} + +func (mbrs *Members) getMemberPeerAddress(id uint64) (peer.ID, error) { + member := mbrs.getMember(id) + if member == nil { + return "", ErrNotExistRaftMember + } + + return member.GetPeerID(), nil +} + +// hasDuplicatedMember returns true if any attributes of the given member is equal to the attributes of cluster members +func (mbrs *Members) hasDuplicatedMember(m *consensus.Member) error { + for _, prevMember := range mbrs.MapByID { + if prevMember.HasDuplicatedAttr(m) { + logger.Error().Str("old", prevMember.ToString()).Str("new", m.ToString()).Msg("duplicated attribute for new member") + return ErrDupBP + } + } + return nil +} + +func MaxUint64(x, y uint64) uint64 { + if x < y { + return y + } + return x +} + +/* +// hasSynced get result of GetPeers request from P2P service and check if chain of this node is synchronized with majority of members +func (cc *Cluster) hasSynced() (bool, error) { + var peers map[peer.ID]*message.PeerInfo + var err error + var peerBestNo uint64 = 0 + + if cc.Size == 1 { + return true, nil + } + + // request GetPeers to p2p + getBPPeers := func() (map[peer.ID]*message.PeerInfo, error) { + peers := make(map[peer.ID]*message.PeerInfo) + + result, err := cc.RequestFuture(message.P2PSvc, &message.GetPeers{}, time.Second, "raft cluster sync test").Result() + if err != nil { + return nil, err + } + + msg := result.(*message.GetPeersRsp) + + for _, peerElem := range msg.Peers { + peerID := peer.ID(peerElem.Addr.PeerID) + state := peerElem.State + + if peerElem.Self { + continue + } + + if state.Get() != types.RUNNING { + logger.Debug().Str("peer", p2putil.ShortForm(peerID)).Msg("peer is not running") + continue + + } + + // check if peer is not bp + if _, ok := cc.Index[peerID]; !ok { + continue + } + + peers[peerID] = peerElem + + peerBestNo = MaxUint64(peerElem.LastBlockNumber, peerBestNo) + } + + return peers, nil + } + + if peers, err = getBPPeers(); err != nil { + return false, err + } + + if uint16(len(peers)) < (cc.Quorum() - 1) { + logger.Debug().Msg("a majority of peers are not connected") + return false, nil + } + + var best *types.Block + if best, err = cc.cdb.GetBestBlock(); err != nil { + return false, err + } + + if best.BlockNo()+DefaultMarginChainDiff < peerBestNo { + logger.Debug().Uint64("best", best.BlockNo()).Uint64("peerbest", peerBestNo).Msg("chain was not synced with majority of peers") + return false, nil + } + + logger.Debug().Uint64("best", best.BlockNo()).Uint64("peerbest", peerBestNo).Int("margin", DefaultMarginChainDiff).Msg("chain has been synced with majority of peers") + + return true, nil +} +*/ +func (cl *Cluster) toStringWithLock() string { + var buf string + + buf = fmt.Sprintf("total=%d, NodeName=%s, RaftID=%x, ", cl.Size, cl.NodeName(), cl.NodeID()) + buf += "members: " + cl.members.toString() + buf += ", appliedMembers: " + cl.members.toString() + + return buf +} + +func (cl *Cluster) toString() string { + cl.Lock() + defer cl.Unlock() + + return cl.toStringWithLock() +} + +func (cl *Cluster) getNodeID(name string) uint64 { + m, ok := cl.Members().MapByName[name] + if !ok { + return consensus.InvalidMemberID + } + + return m.ID +} + +func (cl *Cluster) getRaftInfo(withStatus bool) *RaftInfo { + cl.Lock() + defer cl.Unlock() + + var leader uint64 + if cl.rs != nil { + leader = cl.rs.GetLeader() + } + + var leaderName string + var m *consensus.Member + + if m = cl.Members().getMember(leader); m != nil { + leaderName = m.Name + } else { + leaderName = "id=" + MemberIDToString(leader) + } + + rinfo := &RaftInfo{Leader: leaderName, Total: strconv.FormatUint(uint64(cl.Size), 10), Name: cl.NodeName(), RaftId: MemberIDToString(cl.NodeID())} + + if withStatus && cl.rs != nil { + b, err := cl.rs.Status().MarshalJSON() + if err != nil { + logger.Error().Err(err).Msg("failed to marshalEntryData raft consensus") + } else { + m := json.RawMessage(b) + rinfo.Status = &m + } + } + return rinfo +} + +func (cl *Cluster) toConsensusInfo() *types.ConsensusInfo { + emptyCons := types.ConsensusInfo{ + Type: GetName(), + } + + type PeerInfo struct { + Name string + RaftID string + PeerID string + Addr string + } + + b, err := json.Marshal(cl.getRaftInfo(true)) + if err != nil { + logger.Error().Err(err).Msg("failed to marshalEntryData raft consensus") + return &emptyCons + } + + cl.Lock() + defer cl.Unlock() + + cons := emptyCons + cons.Info = string(b) + + var i int = 0 + if cl.Size != 0 { + bps := make([]string, cl.Size) + + for id, m := range cl.Members().MapByID { + bp := &PeerInfo{Name: m.Name, RaftID: MemberIDToString(m.ID), PeerID: m.GetPeerID().Pretty(), Addr: m.Url} + b, err = json.Marshal(bp) + if err != nil { + logger.Error().Err(err).Str("raftid", MemberIDToString(id)).Msg("failed to marshalEntryData raft consensus bp") + return &emptyCons + } + bps[i] = string(b) + + i++ + } + cons.Bps = bps + } + + return &cons +} + +func (cl *Cluster) NewMemberFromAddReq(req *types.MembershipChange) (*consensus.Member, error) { + peerID, err := peer.IDB58Decode(string(req.Attr.PeerID)) + if err != nil { + return nil, err + } + return consensus.NewMember(req.Attr.Name, req.Attr.Url, peerID, cl.chainID, time.Now().UnixNano()), nil +} + +func (cl *Cluster) NewMemberFromRemoveReq(req *types.MembershipChange) (*consensus.Member, error) { + if req.Attr.ID == consensus.InvalidMemberID { + return nil, consensus.ErrInvalidMemberID + } + + member := consensus.NewMember("", "", peer.ID(""), cl.chainID, 0) + member.SetMemberID(req.Attr.ID) + + return member, nil +} + +func (cl *Cluster) ChangeMembership(req *types.MembershipChange) (*consensus.Member, error) { + var ( + propose *consensus.ConfChangePropose + err error + ) + + if propose, err = cl.requestConfChange(req); err != nil { + return nil, err + } + + return cl.recvConfChangeReply(propose.ReplyC) +} + +func (cl *Cluster) requestConfChange(req *types.MembershipChange) (*consensus.ConfChangePropose, error) { + cl.Lock() + defer cl.Unlock() + + if cl.savedChange != nil { + return nil, ErrPendingConfChange + } + + logger.Info().Str("request", req.ToString()).Msg("start to change membership of cluster") + + var ( + member *consensus.Member + err error + ) + + switch req.Type { + case types.MembershipChangeType_ADD_MEMBER: + member, err = cl.NewMemberFromAddReq(req) + + case types.MembershipChangeType_REMOVE_MEMBER: + member, err = cl.NewMemberFromRemoveReq(req) + + default: + return nil, ErrInvalidMembershipReqType + } + + if err != nil { + logger.Error().Err(err).Msg("failed to make new member") + return nil, err + } + + // make raft confChange + cc, err := cl.makeConfChange(req.Type, member) + if err != nil { + logger.Error().Err(err).Msg("failed to make confChange of raft") + return nil, err + } + + // validate member change + if err = cl.validateChangeMembership(cc, member, false); err != nil { + logger.Error().Err(err).Msg("failed to validate request of membership change") + return nil, err + } + + replyC := make(chan *consensus.ConfChangeReply) + + // TODO check cancel + ctx, cancel := context.WithTimeout(context.Background(), MaxConfChangeTimeOut) + defer cancel() + + // send proposeC (confChange, replyC) + proposal := consensus.ConfChangePropose{Ctx: ctx, Cc: cc, ReplyC: replyC} + + cl.saveConfChangePropose(&proposal) + + select { + case cl.confChangeC <- &proposal: + logger.Info().Msg("proposal of confChange is sent to raft") + default: + logger.Error().Msg("proposal of confChange is dropped. confChange channel is busy") + + close(replyC) + cl.resetSavedConfChangePropose() + return nil, ErrConfChangeChannelBusy + } + + return cl.savedChange, nil +} + +func (cl *Cluster) recvConfChangeReply(replyC chan *consensus.ConfChangeReply) (*consensus.Member, error) { + select { + case reply, ok := <-replyC: + if !ok { + logger.Panic().Msg("reply channel of change request must not be closed") + } + + if reply.Err != nil { + logger.Error().Err(reply.Err).Msg("failed confChange") + return nil, reply.Err + } + + logger.Info().Str("cluster", cl.toString()).Str("target", reply.Member.ToString()).Msg("reply of conf change is succeed") + + return reply.Member, nil + case <-time.After(MaxConfChangeTimeOut): + // saved conf change must be reset in raft server after request completes + logger.Warn().Msg("proposal of confChange is time-out") + + return nil, ErrConChangeTimeOut + } +} + +func (cl *Cluster) sendConfChangeReply(cc *raftpb.ConfChange, member *consensus.Member, err error) { + cl.Lock() + defer cl.Unlock() + + if cl.savedChange == nil || cl.savedChange.Cc.ID != cc.ID { + return + } + + propose := cl.savedChange + cl.resetSavedConfChangePropose() + + logger.Debug().Str("req", util.JSON(propose.Cc)).Msg("send reply of conf change") + + propose.ReplyC <- &consensus.ConfChangeReply{Member: member, Err: err} + close(propose.ReplyC) +} + +func (cl *Cluster) saveConfChangePropose(ccPropose *consensus.ConfChangePropose) { + logger.Debug().Uint64("ccid", ccPropose.Cc.ID).Msg("this confChange propose is saved in cluster") + cl.savedChange = ccPropose +} + +func (cl *Cluster) resetSavedConfChangePropose() { + logger.Debug().Msg("reset saved confChange propose") + + cl.savedChange = nil +} + +func (cl *Cluster) validateChangeMembership(cc *raftpb.ConfChange, member *consensus.Member, needlock bool) error { + if member == nil { + return ErrCCMemberIsNil + } + + if needlock { + cl.Lock() + defer cl.Unlock() + } + + appliedMembers := cl.AppliedMembers() + + if member.ID == consensus.InvalidMemberID { + return consensus.ErrInvalidMemberID + } + if cl.RemovedMembers().isExist(member.ID) { + return ErrCCAlreadyRemoved + } + + switch cc.Type { + case raftpb.ConfChangeAddNode: + if !member.IsValid() { + logger.Error().Str("member", member.ToString()).Msg("member has invalid fields") + return ErrInvalidMember + } + + if m := appliedMembers.getMember(member.ID); m != nil { + return ErrCCAlreadyAdded + } + + if err := appliedMembers.hasDuplicatedMember(member); err != nil { + return err + } + + case raftpb.ConfChangeRemoveNode: + var m *consensus.Member + + if m = appliedMembers.getMember(member.ID); m == nil { + return ErrCCNoMemberToRemove + } + + *member = *m + default: + return ErrInvCCType + } + + // - TODO UPDATE + return nil +} + +func (cl *Cluster) makeConfChange(reqType types.MembershipChangeType, member *consensus.Member) (*raftpb.ConfChange, error) { + var changeType raftpb.ConfChangeType + switch reqType { + case types.MembershipChangeType_ADD_MEMBER: + changeType = raftpb.ConfChangeAddNode + case types.MembershipChangeType_REMOVE_MEMBER: + changeType = raftpb.ConfChangeRemoveNode + default: + return nil, ErrInvalidMembershipReqType + } + + logger.Debug().Str("member", member.ToString()).Msg("target member") + + cl.changeSeq++ + + data, err := json.Marshal(member) + if err != nil { + return nil, err + } + + cc := &raftpb.ConfChange{ID: cl.changeSeq, Type: changeType, NodeID: uint64(member.ID), Context: data} + + return cc, nil +} + +func MemberIDToString(id uint64) string { + return fmt.Sprintf("%x", id) +} diff --git a/consensus/impl/raftv2/cluster_test.go b/consensus/impl/raftv2/cluster_test.go new file mode 100644 index 000000000..2e46c3073 --- /dev/null +++ b/consensus/impl/raftv2/cluster_test.go @@ -0,0 +1,78 @@ +package raftv2 + +import ( + "encoding/json" + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/types" + "github.com/libp2p/go-libp2p-peer" + "github.com/stretchr/testify/assert" + "testing" +) + +var ( + testMbrs []*consensus.Member + testPeerID peer.ID + testEncID string + + testSnapData *consensus.SnapshotData +) + +func init() { + testEncID = "16Uiu2HAkxVB65cmCWceTu4HsHnz8WkUKknZXwr7PYdg2vy1fjDcU" + testPeerID, _ = peer.IDB58Decode(testEncID) + + testMbrs = []*consensus.Member{ + {types.MemberAttr{ + ID: 1, + Name: "testm1", + Url: "http://127.0.0.1:13001", + PeerID: []byte(testPeerID), + }}, + {types.MemberAttr{ + ID: 2, + Name: "testm2", + Url: "http://127.0.0.1:13002", + PeerID: []byte(testPeerID), + }}, + {types.MemberAttr{ + ID: 3, + Name: "testm3", + Url: "http://127.0.0.1:13003", + PeerID: []byte(testPeerID), + }}, + } + + testBlock := types.NewBlock(nil, nil, nil, nil, nil, 0) + + testSnapData = consensus.NewSnapshotData(testMbrs, nil, testBlock) +} + +func TestMemberJson(t *testing.T) { + mbr := testMbrs[0] + + data, err := json.Marshal(mbr) + assert.NoError(t, err) + + var newMbr = consensus.Member{} + err = json.Unmarshal(data, &newMbr) + assert.NoError(t, err) + + assert.NoError(t, err) + //t.Logf("peer=%s", peer.IDB58Encode(newMbr.GetPeerID())) + + assert.True(t, mbr.Equal(&newMbr)) +} + +func TestSnapDataJson(t *testing.T) { + var snapdata = testSnapData + + data, err := snapdata.Encode() + assert.NoError(t, err) + + var newSnapdata = &consensus.SnapshotData{} + + err = newSnapdata.Decode(data) + assert.NoError(t, err) + + assert.True(t, snapdata.Equal(newSnapdata)) +} diff --git a/consensus/impl/raftv2/config.go b/consensus/impl/raftv2/config.go new file mode 100644 index 000000000..bcb0ff1d6 --- /dev/null +++ b/consensus/impl/raftv2/config.go @@ -0,0 +1,180 @@ +package raftv2 + +import ( + "errors" + "fmt" + "net/url" + "os" + "strings" + "time" + + "github.com/aergoio/aergo/chain" + "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/consensus" + "github.com/libp2p/go-libp2p-peer" +) + +var ( + ErrNotIncludedRaftMember = errors.New("this node isn't included in initial raft members") + ErrRaftEmptyTLSFile = errors.New("cert or key file name is empty") + ErrNotHttpsURL = errors.New("url scheme is not https") + ErrDupBP = errors.New("raft bp description is duplicated") + ErrInvalidRaftPeerID = errors.New("peerID of current raft bp is not equals to p2p configure") +) + +const ( + DefaultTickMS = time.Millisecond * 30 +) + +func (bf *BlockFactory) InitCluster(cfg *config.Config) error { + useTls := true + var err error + + raftConfig := cfg.Consensus.Raft + if raftConfig == nil { + panic("raftconfig is not set. please set raftName, raftBPs.") + } + + //set default + if raftConfig.Tick != 0 { + RaftTick = time.Duration(raftConfig.Tick * 1000000) + } + + if raftConfig.SnapFrequency != 0 { + ConfSnapFrequency = raftConfig.SnapFrequency + ConfSnapshotCatchUpEntriesN = raftConfig.SnapFrequency + } + + chainID, err := chain.Genesis.ID.Bytes() + if err != nil { + return err + } + + bf.bpc = NewCluster(chainID, bf, raftConfig.Name, chain.Genesis.Timestamp) + + if useTls, err = validateTLS(raftConfig); err != nil { + logger.Error().Err(err). + Str("key", raftConfig.KeyFile). + Str("cert", raftConfig.CertFile). + Msg("failed to validate tls config for raft") + return err + } + + if raftConfig.ListenUrl != "" { + if err := isValidURL(raftConfig.ListenUrl, useTls); err != nil { + logger.Error().Err(err).Msg("failed to validate listen url for raft") + return err + } + } + + if err = bf.bpc.AddInitialMembers(raftConfig, useTls); err != nil { + logger.Error().Err(err).Msg("failed to validate bpurls, bpid config for raft") + return err + } + + if bf.bpc.Members().len() == 0 { + logger.Fatal().Str("cluster", bf.bpc.toString()).Msg("can't start raft server because there are no members in cluster") + } + + RaftSkipEmptyBlock = raftConfig.SkipEmpty + + logger.Info().Bool("skipempty", RaftSkipEmptyBlock).Int64("rafttick(nanosec)", RaftTick.Nanoseconds()).Float64("interval(sec)", consensus.BlockInterval.Seconds()).Msg(bf.bpc.toString()) + + return nil +} + +func validateTLS(raftCfg *config.RaftConfig) (bool, error) { + if len(raftCfg.CertFile) == 0 && len(raftCfg.KeyFile) == 0 { + return false, nil + } + + //ë‘ íŒŒì¼ì´ ëª¨ë‘ ì„¤ì •ë˜ì–´ 있는지 í™•ì¸ + //실제 fileì— ì¡´ìž¬í•˜ëŠ”ì§€ í™•ì¸ + if len(raftCfg.CertFile) == 0 || len(raftCfg.KeyFile) == 0 { + logger.Error().Str("raftcertfile", raftCfg.CertFile).Str("raftkeyfile", raftCfg.KeyFile). + Msg(ErrRaftEmptyTLSFile.Error()) + return false, ErrRaftEmptyTLSFile + } + + if len(raftCfg.CertFile) != 0 { + if _, err := os.Stat(raftCfg.CertFile); err != nil { + logger.Error().Err(err).Msg("not exist certificate file for raft") + return false, err + } + } + + if len(raftCfg.KeyFile) != 0 { + if _, err := os.Stat(raftCfg.KeyFile); err != nil { + logger.Error().Err(err).Msg("not exist Key file for raft") + return false, err + } + } + + return true, nil +} + +func isValidURL(urlstr string, useTls bool) error { + var urlobj *url.URL + var err error + + if urlobj, err = consensus.ParseToUrl(urlstr); err != nil { + logger.Error().Str("url", urlstr).Err(err).Msg("raft bp urlstr is not vaild form") + return err + } + + if useTls && urlobj.Scheme != "https" { + logger.Error().Str("urlstr", urlstr).Msg("raft bp urlstr shoud use https protocol") + return ErrNotHttpsURL + } + + return nil +} + +func (cl *Cluster) AddInitialMembers(raftCfg *config.RaftConfig, useTls bool) error { + logger.Debug().Msg("add cluster members from config file") + lenBPs := len(raftCfg.BPs) + if lenBPs == 0 { + return fmt.Errorf("config of raft bp is empty") + } + + // validate each bp + for _, raftBP := range raftCfg.BPs { + trimUrl := strings.TrimSpace(raftBP.Url) + + if err := isValidURL(trimUrl, useTls); err != nil { + return err + } + + peerID, err := peer.IDB58Decode(raftBP.P2pID) + if err != nil { + return fmt.Errorf("invalid raft peerID %s", raftBP.P2pID) + } + + m := consensus.NewMember(raftBP.Name, trimUrl, peerID, cl.chainID, cl.chainTimestamp) + + if err := cl.isValidMember(m); err != nil { + return err + } + if err := cl.addMember(m, false); err != nil { + return err + } + } + + return nil +} + +func (cl *Cluster) SetThisNodeID() error { + cl.Lock() + defer cl.Unlock() + + var member *consensus.Member + + if member = cl.Members().getMemberByName(cl.NodeName()); member == nil { + return ErrNotIncludedRaftMember + } + + // it can be reset when this node is added to cluster + cl.SetNodeID(member.ID) + + return nil +} diff --git a/consensus/impl/raft/listener.go b/consensus/impl/raftv2/listener.go similarity index 99% rename from consensus/impl/raft/listener.go rename to consensus/impl/raftv2/listener.go index 4836b04d2..6d2f8e069 100644 --- a/consensus/impl/raft/listener.go +++ b/consensus/impl/raftv2/listener.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package raft +package raftv2 import ( "errors" diff --git a/consensus/impl/raftv2/p2p.go b/consensus/impl/raftv2/p2p.go new file mode 100644 index 000000000..b67846fa5 --- /dev/null +++ b/consensus/impl/raftv2/p2p.go @@ -0,0 +1,59 @@ +package raftv2 + +import ( + "errors" + "fmt" + "github.com/aergoio/aergo/message" + "github.com/aergoio/aergo/pkg/component" + "time" +) + +var ( + MaxTimeOutCluter = time.Second * 10 + MaxTryGetCluster = 3 + + ErrGetClusterReplyC = errors.New("reply channel of getcluster request is closed") + ErrGetClusterTimeout = errors.New("timeout for getcluster") + ErrGetClusterEmpty = errors.New("getcluster reply is empty") + ErrGetClusterFail = errors.New("failed to get cluster info") +) + +// GetBestBlock returns the current best block from chainservice +func GetClusterInfo(hs *component.ComponentHub) (*Cluster, error) { + logger.Info().Msg("try getclusterinfo to p2p") + + replyC := make(chan *message.GetClusterRsp) + hs.Tell(message.P2PSvc, &message.GetCluster{ReplyC: replyC}) + + var ( + rsp *message.GetClusterRsp + ok bool + err error + newCl *Cluster + ) + + select { + case rsp, ok = <-replyC: + if !ok { + return nil, ErrGetClusterReplyC + } + + if rsp.Err != nil { + return nil, fmt.Errorf("get cluster failed: %s", rsp.Err) + } + + if len(rsp.Members) == 0 { + return nil, ErrGetClusterEmpty + } + + case <-time.After(MaxTimeOutCluter): + return nil, ErrGetClusterTimeout + } + + if newCl, err = NewClusterFromMemberAttrs(rsp.ChainID, rsp.Members); err != nil { + return nil, err + } + + //logger.Debug().Str("info", newCl.toString()).Msg("get remote cluster info") + return newCl, nil +} diff --git a/consensus/impl/raft/raftlogger.go b/consensus/impl/raftv2/raftlogger.go similarity index 99% rename from consensus/impl/raft/raftlogger.go rename to consensus/impl/raftv2/raftlogger.go index f22e84bc5..ee886e9d1 100644 --- a/consensus/impl/raft/raftlogger.go +++ b/consensus/impl/raftv2/raftlogger.go @@ -1,4 +1,4 @@ -package raft +package raftv2 import ( "fmt" diff --git a/consensus/impl/raftv2/raftserver.go b/consensus/impl/raftv2/raftserver.go new file mode 100644 index 000000000..8da26c3d4 --- /dev/null +++ b/consensus/impl/raftv2/raftserver.go @@ -0,0 +1,1223 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftv2 + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "errors" + "github.com/aergoio/aergo/chain" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/pkg/component" + "github.com/gogo/protobuf/proto" + "io" + "net/http" + "net/url" + "os" + "runtime/debug" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/types" + + "github.com/aergoio/etcd/etcdserver/stats" + etcdtypes "github.com/aergoio/etcd/pkg/types" + raftlib "github.com/aergoio/etcd/raft" + "github.com/aergoio/etcd/raft/raftpb" + "github.com/aergoio/etcd/rafthttp" + "github.com/aergoio/etcd/snap" +) + +//noinspection ALL +var ( + raftLogger raftlib.Logger + ConfSnapFrequency uint64 = 10 + ConfSnapshotCatchUpEntriesN uint64 = ConfSnapFrequency +) + +var ( + ErrNoSnapshot = errors.New("no snapshot") + ErrCCAlreadyApplied = errors.New("conf change entry is already applied") + ErrInvalidMember = errors.New("member of conf change is invalid") + ErrCCAlreadyAdded = errors.New("member has already added") + ErrCCAlreadyRemoved = errors.New("member has already removed") + ErrCCNoMemberToRemove = errors.New("there is no member to remove") + ErrEmptySnapshot = errors.New("received empty snapshot") + ErrInvalidRaftIdentity = errors.New("raft identity is not set") +) + +const ( + HasNoLeader uint64 = 0 +) + +func init() { + raftLogger = NewRaftLogger(logger) +} + +// A key-value stream backed by raft +// A key-value stream backed by raft +type raftServer struct { + *component.ComponentHub + sync.RWMutex + + pa p2pcommon.PeerAccessor + + cluster *Cluster + + confChangeC <-chan *consensus.ConfChangePropose // proposed cluster config changes + commitC chan *commitEntry // entries committed to log (k,v) + errorC chan error // errors from raft session + + id uint64 // client ID for raft session + listenUrl string + join bool // node is joining an existing cluster + getSnapshot func() ([]byte, error) + lastIndex uint64 // index of log at start + + snapshotIndex uint64 + appliedIndex uint64 + + // raft backing for the commit/error channel + node raftlib.Node + raftStorage *raftlib.MemoryStorage + //wal *wal.WAL + walDB *WalDB + + snapshotter *ChainSnapshotter + snapshotterReady chan *snap.Snapshotter // signals when snapshotter is ready + + snapFrequency uint64 + transport *rafthttp.Transport + stopc chan struct{} // signals proposal channel closed + httpstopc chan struct{} // signals http server to shutdown + httpdonec chan struct{} // signals http server shutdown complete + + leaderStatus LeaderStatus + + certFile string + keyFile string + + promotable bool + + tickMS time.Duration + + confState *raftpb.ConfState + + commitProgress CommitProgress +} + +type LeaderStatus struct { + leader uint64 + leaderChanged uint64 +} + +type commitEntry struct { + block *types.Block + index uint64 + term uint64 +} + +type CommitProgress struct { + sync.Mutex + + connect commitEntry // last connected entry to chain + request commitEntry // last requested entry to commitC +} + +func (cp *CommitProgress) UpdateConnect(ce *commitEntry) { + logger.Debug().Uint64("term", ce.term).Uint64("index", ce.index).Uint64("no", ce.block.BlockNo()).Str("hash", ce.block.ID()).Msg("set progress of last connected block") + + cp.Lock() + defer cp.Unlock() + + cp.connect = *ce +} + +func (cp *CommitProgress) UpdateRequest(ce *commitEntry) { + logger.Debug().Uint64("term", ce.term).Uint64("index", ce.index).Uint64("no", ce.block.BlockNo()).Str("hash", ce.block.ID()).Msg("set progress of last request block") + + cp.Lock() + defer cp.Unlock() + + cp.request = *ce +} + +func (cp *CommitProgress) GetConnect() *commitEntry { + cp.Lock() + defer cp.Unlock() + + return &cp.connect +} + +func (cp *CommitProgress) GetRequest() *commitEntry { + cp.Lock() + defer cp.Unlock() + + return &cp.request +} + +func (cp *CommitProgress) IsReadyToPropose() bool { + cp.Lock() + defer cp.Unlock() + + if cp.request.block == nil { + return true + } + + var connNo, reqNo uint64 + reqNo = cp.request.block.BlockNo() + if cp.connect.block != nil { + connNo = cp.connect.block.BlockNo() + } + + if reqNo <= connNo { + return true + } + + logger.Debug().Uint64("requested", reqNo).Uint64("connected", connNo).Msg("remain pending request to conenct") + + return false +} + +func RecoverExit() { + if r := recover(); r != nil { + logger.Error().Str("callstack", string(debug.Stack())).Msg("panic occurred in raft server") + os.Exit(10) + } +} + +func makeConfig(nodeID uint64, storage *raftlib.MemoryStorage) *raftlib.Config { + c := &raftlib.Config{ + ID: nodeID, + ElectionTick: 10, + HeartbeatTick: 1, + Storage: storage, + MaxSizePerMsg: 1024 * 1024, + MaxInflightMsgs: 256, + Logger: raftLogger, + CheckQuorum: true, + DisableProposalForwarding: true, + } + + return c +} + +// newRaftServer initiates a raft instance and returns a committed log entry +// channel and error channel. Proposals for log updates are sent over the +// provided the proposal channel. All log entries are replayed over the +// commit channel, followed by a nil message (to indicate the channel is +// current), then new log entries. To shutdown, close proposeC and read errorC. +func newRaftServer(hub *component.ComponentHub, + cluster *Cluster, + listenUrl string, join bool, + certFile string, keyFile string, + getSnapshot func() ([]byte, error), + tickMS time.Duration, + confChangeC chan *consensus.ConfChangePropose, + commitC chan *commitEntry, + delayPromote bool, + chainWal consensus.ChainWAL) *raftServer { + + errorC := make(chan error, 1) + + rs := &raftServer{ + ComponentHub: hub, + RWMutex: sync.RWMutex{}, + cluster: cluster, + walDB: NewWalDB(chainWal), + confChangeC: confChangeC, + commitC: commitC, + errorC: errorC, + listenUrl: listenUrl, + join: join, + getSnapshot: getSnapshot, + snapFrequency: ConfSnapFrequency, + stopc: make(chan struct{}), + httpstopc: make(chan struct{}), + httpdonec: make(chan struct{}), + + snapshotterReady: make(chan *snap.Snapshotter, 1), + // rest of structure populated after WAL replay + + certFile: certFile, + keyFile: keyFile, + + promotable: true, + tickMS: tickMS, + commitProgress: CommitProgress{}, + } + + if delayPromote { + rs.SetPromotable(false) + } + + rs.snapshotter = newChainSnapshotter(nil, rs.ComponentHub, rs.cluster, rs.walDB, func() uint64 { return rs.GetLeader() }) + + return rs +} + +func (rs *raftServer) SetID(id uint64) { + logger.Info().Str("id", MemberIDToString(id)).Msg("set my raft id") + rs.id = id +} + +func (rs *raftServer) SetPeerAccessor(pa p2pcommon.PeerAccessor) { + rs.pa = pa + rs.snapshotter.setPeerAccessor(pa) +} + +func (rs *raftServer) SetPromotable(val bool) { + rs.Lock() + defer rs.Unlock() + + rs.promotable = val +} + +func (rs *raftServer) GetPromotable() bool { + rs.RLock() + defer rs.RUnlock() + + val := rs.promotable + + return val +} + +func (rs *raftServer) Start() { + go rs.startRaft() +} + +func (rs *raftServer) makeStartPeers() ([]raftlib.Peer, error) { + return rs.cluster.getStartPeers() +} + +type RaftServerState int + +const ( + RaftServerStateRestart = iota + RaftServerStateNewCluster + RaftServerStateJoinCluster +) + +func (rs *raftServer) startRaft() { + var node raftlib.Node + + getState := func() RaftServerState { + hasWal, err := rs.walDB.HasWal() + if err != nil { + logger.Fatal().Msg("failed to check if wal has initializeded") + } + + switch { + case hasWal: + return RaftServerStateRestart + case rs.join: + return RaftServerStateJoinCluster + default: + return RaftServerStateNewCluster + } + + } + + switch getState() { + case RaftServerStateRestart: + logger.Info().Msg("raft restart from wal") + + rs.cluster.ResetMembers() + + snapshot, err := rs.loadSnapshot() + if err != nil { + logger.Fatal().Err(err).Msg("failed to read snapshot") + } + + if err := rs.replayWAL(snapshot); err != nil { + logger.Fatal().Err(err).Msg("replay wal failed for raft") + } + + // cluster identity is recoverd from wal + rs.SetID(rs.cluster.NodeID()) + + // members of cluster will be loaded from snapshot or wal + if snapshot != nil { + if err := rs.cluster.Recover(snapshot); err != nil { + logger.Fatal().Err(err).Msg("failt to recover cluster from snapshot") + } + } + + c := makeConfig(rs.id, rs.raftStorage) + + logger.Info().Msg("raft node restart") + node = raftlib.RestartNode(c) + case RaftServerStateJoinCluster: + logger.Info().Msg("raft start at first time and join existing cluster") + + // get cluster info from existing cluster member + existCluster, err := rs.GetExistingCluster() + if err != nil { + logger.Fatal().Err(err).Str("mine", rs.cluster.toString()).Msg("failed to get existing cluster info") + } + + // config validate + if !rs.cluster.ValidateAndMergeExistingCluster(existCluster) { + logger.Fatal().Str("existcluster", existCluster.toString()).Str("mycluster", rs.cluster.toString()).Msg("this cluster configuration is not compatible with existing cluster") + } + + rs.SetID(rs.cluster.NodeID()) + if err := rs.SaveIdentity(); err != nil { + logger.Fatal().Err(err).Msg("fafiled to save identity") + } + + rs.raftStorage = raftlib.NewMemoryStorage() + + // reset my raft nodeID from existing cluster + c := makeConfig(rs.id, rs.raftStorage) + + logger.Info().Msg("raft node start") + node = raftlib.StartNode(c, nil) + case RaftServerStateNewCluster: + logger.Info().Msg("raft start at first time and makes new cluster") + + var startPeers []raftlib.Peer + + if err := rs.cluster.SetThisNodeID(); err != nil { + logger.Fatal().Err(err).Msg("failed to set id of this node") + } + rs.SetID(rs.cluster.NodeID()) + if err := rs.SaveIdentity(); err != nil { + logger.Fatal().Err(err).Msg("fafiled to save identity") + } + + startPeers, err := rs.makeStartPeers() + if err != nil { + logger.Fatal().Err(err).Msg("failed to make raft peer list") + } + + rs.raftStorage = raftlib.NewMemoryStorage() + + c := makeConfig(rs.id, rs.raftStorage) + + logger.Info().Msg("raft node start") + node = raftlib.StartNode(c, startPeers) + } + + // need locking for sync with consensusAccessor + rs.setNodeSync(node) + + rs.startTransport() + + go rs.serveRaft() + go rs.serveChannels() +} + +func (rs *raftServer) startTransport() { + rs.transport = &rafthttp.Transport{ + ID: etcdtypes.ID(rs.id), + ClusterID: 0x1000, + Raft: rs, + ServerStats: stats.NewServerStats("", ""), + LeaderStats: stats.NewLeaderStats(strconv.FormatUint(uint64(rs.id), 10)), + Snapshotter: rs.snapshotter, + ErrorC: rs.errorC, + } + + rs.transport.SetLogger(httpLogger) + + if err := rs.transport.Start(); err != nil { + logger.Fatal().Err(err).Msg("failed to start raft http") + } + + for _, member := range rs.cluster.Members().MapByID { + if rs.cluster.NodeID() != member.ID { + rs.transport.AddPeer(etcdtypes.ID(member.ID), []string{member.Url}) + } + } +} + +func (rs *raftServer) SaveIdentity() error { + if rs.cluster.NodeID() == consensus.InvalidMemberID || len(rs.cluster.NodeName()) == 0 { + return ErrInvalidRaftIdentity + } + + if err := rs.walDB.WriteIdentity(&rs.cluster.identity); err != nil { + logger.Fatal().Err(err).Msg("failed to write raft identity to wal") + return err + } + + return nil +} + +func (rs *raftServer) setNodeSync(node raftlib.Node) { + rs.Lock() + defer rs.Unlock() + + rs.node = node +} + +func (rs *raftServer) getNodeSync() raftlib.Node { + var node raftlib.Node + + rs.RLock() + defer rs.RUnlock() + + node = rs.node + + return node +} + +// stop closes http, closes all channels, and stops raft. +func (rs *raftServer) stop() { + rs.stopHTTP() + close(rs.commitC) + close(rs.errorC) + rs.node.Stop() +} + +func (rs *raftServer) stopHTTP() { + rs.transport.Stop() + close(rs.httpstopc) + <-rs.httpdonec +} + +func (rs *raftServer) writeError(err error) { + logger.Error().Err(err).Msg("write err has occurend raft server. ") +} + +// TODO timeout handling with context +func (rs *raftServer) Propose(block *types.Block) error { + if data, err := marshalEntryData(block); err == nil { + // blocks until accepted by raft state machine + if err := rs.node.Propose(context.TODO(), data); err != nil { + return err + } + + logger.Debug().Int("len", len(data)).Msg("proposed data to raft node") + } else { + logger.Fatal().Err(err).Msg("poposed data is invalid") + } + + return nil +} + +func (rs *raftServer) serveConfChange() { + handleConfChange := func(propose *consensus.ConfChangePropose) { + if err := rs.node.ProposeConfChange(context.TODO(), *propose.Cc); err != nil { + logger.Error().Err(err).Msg("failed to propose configure change") + rs.cluster.sendConfChangeReply(propose.Cc, nil, err) + return + } + } + + // send proposals over raft + for rs.confChangeC != nil { + select { + case confChangePropose, ok := <-rs.confChangeC: + if !ok { + rs.confChangeC = nil + } else { + handleConfChange(confChangePropose) + } + } + } + // client closed channel; shutdown raft if not already + close(rs.stopc) +} + +func (rs *raftServer) serveChannels() { + defer RecoverExit() + + snapshot, err := rs.raftStorage.Snapshot() + if err != nil { + panic(err) + } + rs.setConfState(&snapshot.Metadata.ConfState) + rs.setSnapshotIndex(snapshot.Metadata.Index) + rs.setAppliedIndex(snapshot.Metadata.Index) + + ticker := time.NewTicker(rs.tickMS) + defer ticker.Stop() + + go rs.serveConfChange() + + // event loop on raft state machine updates + for { + select { + case <-ticker.C: + if rs.GetPromotable() { + rs.node.Tick() + } + + // store raft entries to walDB, then publish over commit channel + case rd := <-rs.node.Ready(): + if len(rd.Entries) > 0 || len(rd.CommittedEntries) > 0 || !raftlib.IsEmptyHardState(rd.HardState) || rd.SoftState != nil { + logger.Debug().Int("entries", len(rd.Entries)).Int("commitentries", len(rd.CommittedEntries)).Str("hardstate", rd.HardState.String()).Msg("ready to process") + } + + if rs.IsLeader() { + if err := rs.processMessages(rd.Messages); err != nil { + logger.Fatal().Err(err).Msg("leader process message error") + } + } + + if err := rs.walDB.SaveEntry(rd.HardState, rd.Entries); err != nil { + logger.Fatal().Err(err).Msg("failed to save entry to wal") + } + + if !raftlib.IsEmptySnap(rd.Snapshot) { + if err := rs.walDB.WriteSnapshot(&rd.Snapshot); err != nil { + logger.Fatal().Err(err).Msg("failed to save snapshot to wal") + } + + if err := rs.raftStorage.ApplySnapshot(rd.Snapshot); err != nil { + logger.Fatal().Err(err).Msg("failed to apply snapshot") + } + + if err := rs.publishSnapshot(rd.Snapshot); err != nil { + logger.Fatal().Err(err).Msg("failed to publish snapshot") + } + } + if err := rs.raftStorage.Append(rd.Entries); err != nil { + logger.Fatal().Err(err).Msg("failed to append new entries to raft log") + } + + if !rs.IsLeader() { + if err := rs.processMessages(rd.Messages); err != nil { + logger.Fatal().Err(err).Msg("process message error") + } + } + if ok := rs.publishEntries(rs.entriesToApply(rd.CommittedEntries)); !ok { + rs.stop() + return + } + rs.triggerSnapshot() + + // New block must be created after connecting all commited block + if rd.SoftState != nil { + rs.updateLeader(rd.SoftState) + } + + rs.node.Advance() + case err := <-rs.errorC: + rs.writeError(err) + return + + case <-rs.stopc: + rs.stop() + return + } + } +} + +func (rs *raftServer) processMessages(msgs []raftpb.Message) error { + var err error + var tmpSnapMsg *snap.Message + + snapMsgs := make([]*snap.Message, 0) + + // reset MsgSnap to send snap.Message + for i, msg := range msgs { + if msg.Type == raftpb.MsgSnap { + tmpSnapMsg, err = rs.makeSnapMessage(&msg) + if err != nil { + return err + } + snapMsgs = append(snapMsgs, tmpSnapMsg) + + msgs[i].To = 0 + } + } + + rs.transport.Send(msgs) + + for _, tmpSnapMsg := range snapMsgs { + rs.transport.SendSnapshot(*tmpSnapMsg) + } + + return nil +} + +func (rs *raftServer) makeSnapMessage(msg *raftpb.Message) (*snap.Message, error) { + if msg.Type != raftpb.MsgSnap { + return nil, ErrNotMsgSnap + } + + /* + // make snapshot with last progress of raftserver + snapshot, err := rs.snapshotter.createSnapshot(rs.prevProgress, rs.confState) + if err != nil { + return nil, err + } + + msg.Snapshot = *snapshot + */ + // TODO add cluster info to snapshot.data + + logger.Debug().Uint64("term", msg.Term).Uint64("index", msg.Index).Msg("send merged snapshot message") + + // not using pipe to send snapshot + pr, pw := io.Pipe() + + go func() { + buf := new(bytes.Buffer) + err := binary.Write(buf, binary.LittleEndian, int32(1)) + if err != nil { + logger.Fatal().Err(err).Msg("raft pipe binary write err") + } + + n, err := pw.Write(buf.Bytes()) + if err == nil { + logger.Debug().Msgf("wrote database snapshot out [total bytes: %d]", n) + } else { + logger.Error().Msgf("failed to write database snapshot out [written bytes: %d]: %v", n, err) + } + if err := pw.CloseWithError(err); err != nil { + logger.Fatal().Err(err).Msg("raft pipe close error") + } + }() + + return snap.NewMessage(*msg, pr, 4), nil +} + +func (rs *raftServer) serveRaft() { + defer RecoverExit() + + urlstr := rs.listenUrl + urlData, err := url.Parse(urlstr) + if err != nil { + logger.Fatal().Err(err).Str("url", urlstr).Msg("Failed parsing URL") + } + + ln, err := newStoppableListener(urlData.Host, rs.httpstopc) + if err != nil { + logger.Fatal().Err(err).Str("url", urlstr).Msg("Failed to listen rafthttp") + } + + if len(rs.certFile) != 0 && len(rs.keyFile) != 0 { + logger.Info().Str("url", urlstr).Str("certfile", rs.certFile).Str("keyfile", rs.keyFile). + Msg("raft http server(tls) started") + + err = (&http.Server{Handler: rs.transport.Handler()}).ServeTLS(ln, rs.certFile, rs.keyFile) + } else { + logger.Info().Str("url", urlstr).Msg("raft http server started") + + err = (&http.Server{Handler: rs.transport.Handler()}).Serve(ln) + } + + select { + case <-rs.httpstopc: + default: + logger.Fatal().Err(err).Msg("Failed to serve rafthttp") + } + close(rs.httpdonec) +} + +func (rs *raftServer) loadSnapshot() (*raftpb.Snapshot, error) { + snapshot, err := rs.walDB.GetSnapshot() + if err != nil { + logger.Fatal().Err(err).Msg("error loading snapshot") + return nil, err + } + + if snapshot == nil { + logger.Info().Msg("snapshot does not exist") + return nil, nil + } + + snapdata := &consensus.SnapshotData{} + + err = snapdata.Decode(snapshot.Data) + if err != nil { + logger.Fatal().Err(err).Msg("error decoding snapshot") + return nil, err + } + + logger.Info().Str("meta", consensus.SnapToString(snapshot, snapdata)).Msg("loaded snapshot meta") + + return snapshot, nil +} + +// replayWAL replays WAL entries into the raft instance. +func (rs *raftServer) replayWAL(snapshot *raftpb.Snapshot) error { + logger.Info().Msg("replaying WAL") + + identity, st, ents, err := rs.walDB.ReadAll(snapshot) + if err != nil { + logger.Fatal().Err(err).Msg("failed to read WAL") + return err + } + + if err := rs.cluster.RecoverIdentity(identity); err != nil { + logger.Fatal().Err(err).Msg("failed to recover raft identity from wal") + } + + rs.raftStorage = raftlib.NewMemoryStorage() + if snapshot != nil { + if err := rs.raftStorage.ApplySnapshot(*snapshot); err != nil { + logger.Fatal().Err(err).Msg("failed to apply snapshot to reaply wal") + } + } + if err := rs.raftStorage.SetHardState(*st); err != nil { + logger.Fatal().Err(err).Msg("failed to set hard state to reaply wal") + } + + // append to storage so raft starts at the right place in log + if err := rs.raftStorage.Append(ents); err != nil { + logger.Fatal().Err(err).Msg("failed to append entries to reaply wal") + } + // send nil once lastIndex is published so client knows commit channel is current + if len(ents) > 0 { + rs.lastIndex = ents[len(ents)-1].Index + } + + logger.Info().Uint64("lastindex", rs.lastIndex).Msg("replaying WAL done") + + return nil +} + +/* +// createSnapshot make marshalled data of chain & cluster info +func (rs *raftServer) createSnapshot() ([]byte, error) { + // this snapshot is used when reboot and initialize raft log + if rs.prevProgress.isEmpty() { + logger.Fatal().Msg("last applied block is nil") + } + + snapBlock := rs.prevProgress.block + + logger.Info().Str("hash", snapBlock.ID()).Uint64("no", snapBlock.BlockNo()).Msg("create new snapshot of chain") + + snap := consensus.NewChainSnapshot(snapBlock) + if snap == nil { + panic("new snap failed") + } + + return snap.Encode() +}*/ + +// triggerSnapshot create snapshot and make compaction for raft log storage +// raft can not wait until last applied entry commits. so snapshot must create from current best block. +// +// @ MatchBlockAndCluster +// snapshot use current state of cluster and confstate. but last applied block may not be commited yet. +// so raft use last commited block. because of this, some conf change log can cause error on node that received snapshot +func (rs *raftServer) triggerSnapshot() { + ce := rs.commitProgress.GetConnect() + newSnapshotIndex, snapBlock := ce.index, ce.block + + if newSnapshotIndex == 0 || rs.confState == nil { + return + } + + if len(rs.confState.Nodes) == 0 { + // TODO Fatal -> Error after test + logger.Fatal().Msg("confstate node is empty for snapshot") + return + } + + if newSnapshotIndex-rs.snapshotIndex <= rs.snapFrequency { + return + } + + logger.Info().Uint64("applied", rs.appliedIndex).Uint64("new snap index", newSnapshotIndex).Uint64("last snapshot index", rs.snapshotIndex).Msg("start snapshot") + + // make snapshot data of best block + snapdata, err := rs.snapshotter.createSnapshotData(rs.cluster, snapBlock, rs.confState) + if err != nil { + logger.Fatal().Err(err).Msg("failed to create snapshot data from prev block") + } + + data, err := snapdata.Encode() + if err != nil { + logger.Fatal().Err(err).Msg("failed to marshal snapshot data") + } + + // snapshot.data is not used for snapshot transfer. At the time of transmission, a message is generated again with information at that time and sent. + snapshot, err := rs.raftStorage.CreateSnapshot(newSnapshotIndex, rs.confState, data) + if err != nil { + logger.Fatal().Err(err).Msg("failed to create snapshot") + } + + // save snapshot to wal + if err := rs.walDB.WriteSnapshot(&snapshot); err != nil { + logger.Fatal().Err(err).Msg("failed to write snapshot") + } + + compactIndex := uint64(1) + if newSnapshotIndex > ConfSnapshotCatchUpEntriesN { + compactIndex = newSnapshotIndex - ConfSnapshotCatchUpEntriesN + } + if err := rs.raftStorage.Compact(compactIndex); err != nil { + if err == raftlib.ErrCompacted { + return + } + panic(err) + } + + logger.Info().Uint64("index", compactIndex).Msg("compacted raftLog.at index") + rs.setSnapshotIndex(newSnapshotIndex) + + chain.TestDebugger.Check(chain.DEBUG_RAFT_SNAP_FREQ, 0, + func(freq int) error { + rs.snapFrequency = uint64(freq) + return nil + }) +} + +func (rs *raftServer) publishSnapshot(snapshotToSave raftpb.Snapshot) error { + updateProgress := func() error { + var snapdata = &consensus.SnapshotData{} + + err := snapdata.Decode(snapshotToSave.Data) + if err != nil { + logger.Error().Msg("failed to unmarshal snapshot data to progress") + return err + } + + block, err := rs.walDB.GetBlockByNo(snapdata.Chain.No) + if err != nil { + logger.Fatal().Msg("failed to get synchronized block") + return err + } + + rs.commitProgress.UpdateConnect(&commitEntry{block: block, index: snapshotToSave.Metadata.Index, term: snapshotToSave.Metadata.Term}) + + return nil + } + + if raftlib.IsEmptySnap(snapshotToSave) { + return ErrEmptySnapshot + } + + logger.Info().Uint64("index", rs.snapshotIndex).Str("snap", consensus.SnapToString(&snapshotToSave, nil)).Msg("publishing snapshot at index") + defer logger.Info().Uint64("index", rs.snapshotIndex).Msg("finished publishing snapshot at index") + + if snapshotToSave.Metadata.Index <= rs.appliedIndex { + logger.Fatal().Msgf("snapshot index [%d] should > progress.appliedIndex [%d] + 1", snapshotToSave.Metadata.Index, rs.appliedIndex) + } + //rs.commitC <- nil // trigger kvstore to load snapshot + + rs.setConfState(&snapshotToSave.Metadata.ConfState) + rs.setSnapshotIndex(snapshotToSave.Metadata.Index) + rs.setAppliedIndex(snapshotToSave.Metadata.Index) + + if err := rs.cluster.Recover(&snapshotToSave); err != nil { + return err + } + + rs.recoverTransport() + + return updateProgress() +} + +func (rs *raftServer) recoverTransport() { + logger.Info().Msg("remove all peers") + rs.transport.RemoveAllPeers() + + for _, m := range rs.cluster.AppliedMembers().MapByID { + if m.ID == rs.cluster.NodeID() { + continue + } + + logger.Info().Str("member", m.ToString()).Msg("add raft peer") + rs.transport.AddPeer(etcdtypes.ID(uint64(m.ID)), []string{m.Url}) + } +} + +func (rs *raftServer) entriesToApply(ents []raftpb.Entry) (nents []raftpb.Entry) { + if len(ents) == 0 { + return + } + firstIdx := ents[0].Index + if firstIdx > rs.appliedIndex+1 { + logger.Fatal().Msgf("first index of committed entry[%d] should <= progress.appliedIndex[%d] 1", firstIdx, rs.appliedIndex) + } + if rs.appliedIndex-firstIdx+1 < uint64(len(ents)) { + nents = ents[rs.appliedIndex-firstIdx+1:] + } + return nents +} + +var ( + ErrInvCCType = errors.New("change type of ") +) + +func (rs *raftServer) ValidateConfChangeEntry(entry *raftpb.Entry) (*raftpb.ConfChange, *consensus.Member, error) { + // TODO XXX validate from current cluster configure + var cc *raftpb.ConfChange + var member *consensus.Member + var err error + + alreadyApplied := func(entry *raftpb.Entry) bool { + return rs.cluster.appliedTerm >= entry.Term || rs.cluster.appliedIndex >= entry.Index + } + + if alreadyApplied(entry) { + return nil, nil, ErrCCAlreadyApplied + } + + unmarshalConfChangeEntry := func() (*raftpb.ConfChange, *consensus.Member, error) { + var cc raftpb.ConfChange + + if err := cc.Unmarshal(entry.Data); err != nil { + logger.Fatal().Err(err).Uint64("idx", entry.Index).Uint64("term", entry.Term).Msg("failed to unmarshal of conf change entry") + return nil, nil, err + } + + // skip confChange of empty context + if len(cc.Context) == 0 { + return nil, nil, nil + } + + var member = consensus.Member{} + if err := json.Unmarshal(cc.Context, &member); err != nil { + logger.Fatal().Err(err).Uint64("idx", entry.Index).Uint64("term", entry.Term).Msg("failed to unmarshal of context of cc entry") + return nil, nil, err + } + + return &cc, &member, nil + } + + cc, member, err = unmarshalConfChangeEntry() + + if err = rs.cluster.validateChangeMembership(cc, member, true); err != nil { + return cc, member, err + } + + return cc, member, nil +} + +// TODO refactoring by cc.Type +// separate unmarshal & apply[type] +// applyConfChange returns false if this node is removed from cluster +func (rs *raftServer) applyConfChange(ent *raftpb.Entry) bool { + var cc *raftpb.ConfChange + var member *consensus.Member + var err error + + if cc, member, err = rs.ValidateConfChangeEntry(ent); err != nil { + logger.Warn().Err(err).Str("cluster", rs.cluster.toString()).Msg("failed to validate conf change") + // reset pending conf change + cc.NodeID = raftlib.None + return true + } + + rs.confState = rs.node.ApplyConfChange(*cc) + + logger.Info().Str("type", cc.Type.String()).Str("member", member.ToString()).Msg("publish confChange entry") + + switch cc.Type { + case raftpb.ConfChangeAddNode: + if err := rs.cluster.addMember(member, true); err != nil { + logger.Fatal().Str("member", member.ToString()).Msg("failed to add member to cluster") + } + + if len(cc.Context) > 0 && rs.id != cc.NodeID { + rs.transport.AddPeer(etcdtypes.ID(cc.NodeID), []string{member.Url}) + } else { + logger.Debug().Msg("skip add peer myself for addnode ") + } + case raftpb.ConfChangeRemoveNode: + if err := rs.cluster.removeMember(member); err != nil { + logger.Fatal().Str("member", member.ToString()).Msg("failed to add member to cluster") + } + + if cc.NodeID == uint64(rs.id) { + logger.Info().Msg("I've been removed from the cluster! Shutting down.") + return false + } + rs.transport.RemovePeer(etcdtypes.ID(cc.NodeID)) + } + + logger.Debug().Str("cluster", rs.cluster.toString()).Msg("after conf changed") + + rs.cluster.sendConfChangeReply(cc, member, nil) + + return true +} + +// publishEntries writes committed log entries to commit channel and returns +// whether all entries could be published. +func (rs *raftServer) publishEntries(ents []raftpb.Entry) bool { + var lastBlockEnt *raftpb.Entry + + for i := range ents { + logger.Info().Uint64("idx", ents[i].Index).Uint64("term", ents[i].Term).Str("type", ents[i].Type.String()).Int("datalen", len(ents[i].Data)).Msg("publish entry") + + switch ents[i].Type { + case raftpb.EntryNormal: + var block *types.Block + var err error + if len(ents[i].Data) != 0 { + if block, err = unmarshalEntryData(ents[i].Data); err != nil { + logger.Fatal().Err(err).Uint64("idx", ents[i].Index).Uint64("term", ents[i].Term).Msg("commit entry is corrupted") + continue + } + + if block != nil { + logger.Info().Str("hash", block.ID()).Uint64("no", block.BlockNo()).Msg("commit normal block entry") + rs.commitProgress.UpdateRequest(&commitEntry{block: block, index: ents[i].Index, term: ents[i].Term}) + } + } + + select { + case rs.commitC <- &commitEntry{block: block, index: ents[i].Index, term: ents[i].Term}: + case <-rs.stopc: + return false + } + + case raftpb.EntryConfChange: + if !rs.applyConfChange(&ents[i]) { + return false + } + } + + // after commit, update appliedIndex + rs.setAppliedIndex(ents[i].Index) + } + + if lastBlockEnt != nil { + } + return true +} + +func (rs *raftServer) setSnapshotIndex(idx uint64) { + logger.Debug().Uint64("index", idx).Msg("raft server set snapshotIndex") + + rs.snapshotIndex = idx +} + +func (rs *raftServer) setAppliedIndex(idx uint64) { + logger.Debug().Uint64("index", idx).Msg("raft server set appliedIndex") + + rs.appliedIndex = idx +} + +func (rs *raftServer) setConfState(state *raftpb.ConfState) { + logger.Debug().Str("state", consensus.ConfStateToString(state)).Msg("raft server set confstate") + + rs.confState = state +} + +func (rs *raftServer) Process(ctx context.Context, m raftpb.Message) error { + return rs.node.Step(ctx, m) +} + +func (rs *raftServer) IsIDRemoved(id uint64) bool { + return rs.cluster.IsIDRemoved(id) +} + +func (rs *raftServer) ReportUnreachable(id uint64) { + logger.Debug().Str("toID", MemberIDToString(id)).Msg("report unreachable") + + rs.node.ReportUnreachable(id) +} + +func (rs *raftServer) ReportSnapshot(id uint64, status raftlib.SnapshotStatus) { + if status == raftlib.SnapshotFinish { + logger.Debug().Str("toID", MemberIDToString(id)).Bool("isSucceed", status == raftlib.SnapshotFinish).Msg("report snapshot result") + } + + rs.node.ReportSnapshot(id, status) +} + +func (rs *raftServer) WaitStartup() { + logger.Debug().Msg("raft start wait") + for s := range rs.commitC { + if s == nil { + break + } + } + logger.Debug().Msg("raft start succeed") +} + +func (rs *raftServer) updateLeader(softState *raftlib.SoftState) { + if softState.Lead != rs.GetLeader() { + atomic.StoreUint64(&rs.leaderStatus.leader, softState.Lead) + + rs.leaderStatus.leaderChanged++ + + logger.Info().Str("ID", MemberIDToString(rs.id)).Str("leader", MemberIDToString(softState.Lead)).Msg("leader changed") + } else { + logger.Info().Str("ID", MemberIDToString(rs.id)).Str("leader", MemberIDToString(softState.Lead)).Msg("soft state leader unchanged") + } +} + +func (rs *raftServer) GetLeader() uint64 { + return atomic.LoadUint64(&rs.leaderStatus.leader) +} + +func (rs *raftServer) IsLeader() bool { + return rs.id != consensus.InvalidMemberID && rs.id == rs.GetLeader() +} + +func (rs *raftServer) Status() raftlib.Status { + node := rs.getNodeSync() + if node == nil { + return raftlib.Status{} + } + + return node.Status() +} + +// GetExistingCluster returns information of existing cluster. +// It request member info to all of peers. +func (rs *raftServer) GetExistingCluster() (*Cluster, error) { + var ( + cl *Cluster + err error + ) + for i := 1; i <= MaxTryGetCluster; i++ { + cl, err = GetClusterInfo(rs.ComponentHub) + if err != nil { + if err != ErrGetClusterTimeout && i != MaxTryGetCluster { + logger.Debug().Err(err).Int("try", i).Msg("failed try to get cluster. and sleep") + time.Sleep(time.Second * 10) + } else { + logger.Warn().Err(err).Int("try", i).Msg("failed try to get cluster") + } + continue + } + + return cl, nil + } + + return nil, ErrGetClusterFail +} + +func marshalEntryData(block *types.Block) ([]byte, error) { + var data []byte + var err error + if data, err = proto.Marshal(block); err != nil { + logger.Fatal().Err(err).Msg("poposed data is invalid") + } + + return data, nil +} + +var ( + ErrUnmarshal = errors.New("failed to unmarshalEntryData log entry") +) + +func unmarshalEntryData(data []byte) (*types.Block, error) { + block := &types.Block{} + if err := proto.Unmarshal(data, block); err != nil { + return block, ErrUnmarshal + } + + return block, nil +} diff --git a/consensus/impl/raftv2/snapshot.go b/consensus/impl/raftv2/snapshot.go new file mode 100644 index 000000000..e68d13840 --- /dev/null +++ b/consensus/impl/raftv2/snapshot.go @@ -0,0 +1,207 @@ +package raftv2 + +import ( + "errors" + chainsvc "github.com/aergoio/aergo/chain" + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/consensus/chain" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/aergoio/aergo/pkg/component" + "github.com/aergoio/aergo/types" + "github.com/aergoio/etcd/raft/raftpb" + "github.com/libp2p/go-libp2p-peer" + "io" + "sync" + "time" +) + +var ( + DfltTimeWaitPeerLive = time.Second * 5 + ErrNotMsgSnap = errors.New("not pb.MsgSnap") + ErrClusterMismatchConfState = errors.New("members of cluster doesn't match with raft confstate") +) + +type getLeaderFuncType func() uint64 + +type ChainSnapshotter struct { + sync.Mutex + + pa p2pcommon.PeerAccessor + + *component.ComponentHub + cluster *Cluster + + walDB *WalDB + + getLeaderFunc getLeaderFuncType +} + +func newChainSnapshotter(pa p2pcommon.PeerAccessor, hub *component.ComponentHub, cluster *Cluster, walDB *WalDB, getLeader getLeaderFuncType) *ChainSnapshotter { + return &ChainSnapshotter{pa: pa, ComponentHub: hub, cluster: cluster, walDB: walDB, getLeaderFunc: getLeader} +} + +func (chainsnap *ChainSnapshotter) setPeerAccessor(pa p2pcommon.PeerAccessor) { + chainsnap.Lock() + defer chainsnap.Unlock() + + chainsnap.pa = pa +} + +/* createSnapshot isn't used this api since new MsgSnap isn't made +// createSnapshot make marshalled data of chain & cluster info +func (chainsnap *ChainSnapshotter) createSnapshot(prevProgress BlockProgress, confState raftpb.ConfState) (*raftpb.Snapshot, error) { + if prevProgress.isEmpty() { + return nil, ErrEmptyProgress + } + + snapdata, err := chainsnap.createSnapshotData(chainsnap.cluster, prevProgress.block) + if err != nil { + logger.Fatal().Err(err).Msg("make snapshot of chain") + return nil, err + } + + + data, err := snapdata.Encode() + if err != nil { + logger.Fatal().Err(err).Msg("failed to marshale snapshot of chain") + return nil, err + } + + snapshot := &raftpb.Snapshot{ + Metadata: raftpb.SnapshotMetadata{ + Index: prevProgress.index, + Term: prevProgress.term, + ConfState: confState, + }, + Data: data, + } + + logger.Info().Str("snapshot", consensus.SnapToString(snapshot, snapdata)).Msg("raft snapshot for remote") + + return snapshot, nil +} +*/ + +// createSnapshotData generate serialized data of chain and cluster info +func (chainsnap *ChainSnapshotter) createSnapshotData(cluster *Cluster, snapBlock *types.Block, confstate *raftpb.ConfState) (*consensus.SnapshotData, error) { + logger.Info().Str("hash", snapBlock.ID()).Uint64("no", snapBlock.BlockNo()).Msg("create new snapshot data of block") + + cluster.Lock() + defer cluster.Unlock() + + if !cluster.isMatch(confstate) { + logger.Fatal().Str("confstate", consensus.ConfStateToString(confstate)).Str("cluster", cluster.toString()).Msg("cluster doesn't match with confstate") + return nil, ErrClusterMismatchConfState + } + + members := cluster.AppliedMembers().ToArray() + removedMembers := cluster.RemovedMembers().ToArray() + + snap := consensus.NewSnapshotData(members, removedMembers, snapBlock) + if snap == nil { + panic("new snap failed") + } + + return snap, nil +} + +// chainSnapshotter rece ives snapshot from http request +// TODO replace rafthttp with p2p +func (chainsnap *ChainSnapshotter) SaveFromRemote(r io.Reader, id uint64, msg raftpb.Message) (int64, error) { + defer RecoverExit() + + if msg.Type != raftpb.MsgSnap { + logger.Error().Int32("type", int32(msg.Type)).Msg("received msg snap is invalid type") + return 0, ErrNotMsgSnap + } + + // not return until block sync is complete + // receive chain & request sync & wait + return 0, chainsnap.syncSnap(&msg.Snapshot) +} + +func (chainsnap *ChainSnapshotter) syncSnap(snap *raftpb.Snapshot) error { + var snapdata = &consensus.SnapshotData{} + + err := snapdata.Decode(snap.Data) + if err != nil { + logger.Error().Msg("failed to unmarshal snapshot data to write") + return err + } + + // write snapshot log in WAL for crash recovery + logger.Info().Str("snap", consensus.SnapToString(snap, snapdata)).Msg("start to sync snapshot") + // TODO request sync for chain with snapshot.data + // wait to finish sync of chain + if err := chainsnap.requestSync(&snapdata.Chain); err != nil { + logger.Error().Err(err).Msg("failed to sync snapshot") + return err + } + + logger.Info().Str("snap", consensus.SnapToString(snap, snapdata)).Msg("finished to sync snapshot") + + return nil +} + +func (chainsnap *ChainSnapshotter) checkPeerLive(peerID peer.ID) bool { + if chainsnap.pa == nil { + logger.Fatal().Msg("peer accessor of chain snapshotter is not set") + } + + _, ok := chainsnap.pa.GetPeer(peerID) + return ok +} + +// TODO handle error case that leader stops while synchronizing +func (chainsnap *ChainSnapshotter) requestSync(snap *consensus.ChainSnapshot) error { + + var leader uint64 + getSyncLeader := func() (peer.ID, error) { + var peerID peer.ID + var err error + + for { + leader = chainsnap.getLeaderFunc() + + if leader == HasNoLeader { + peerID, err = chainsnap.cluster.getAnyPeerAddressToSync() + if err != nil { + logger.Error().Err(err).Str("leader", MemberIDToString(leader)).Msg("can't get peeraddress of leader") + return "", err + } + } else { + peerID, err = chainsnap.cluster.Members().getMemberPeerAddress(leader) + if err != nil { + logger.Error().Err(err).Str("leader", MemberIDToString(leader)).Msg("can't get peeraddress of leader") + return "", err + } + } + + if chainsnap.checkPeerLive(peerID) { + break + } + + logger.Debug().Str("peer", p2putil.ShortForm(peerID)).Str("leader", MemberIDToString(leader)).Msg("peer is not alive") + + time.Sleep(DfltTimeWaitPeerLive) + } + + logger.Debug().Str("peer", p2putil.ShortForm(peerID)).Str("leader", MemberIDToString(leader)).Msg("target peer to sync") + + return peerID, err + } + + chainsvc.TestDebugger.Check(chainsvc.DEBUG_SYNCER_CRASH, 1, nil) + + peerID, err := getSyncLeader() + if err != nil { + return err + } + + if err := chain.SyncChain(chainsnap.ComponentHub, snap.Hash, snap.No, peerID); err != nil { + return err + } + + return nil +} diff --git a/consensus/impl/raftv2/waldb.go b/consensus/impl/raftv2/waldb.go new file mode 100644 index 000000000..c72f94c47 --- /dev/null +++ b/consensus/impl/raftv2/waldb.go @@ -0,0 +1,202 @@ +package raftv2 + +import ( + "errors" + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/types" + "github.com/aergoio/etcd/raft" + "github.com/aergoio/etcd/raft/raftpb" +) + +var ( + ErrInvalidEntry = errors.New("Invalid raftpb.entry") + ErrWalEntryTooLowTerm = errors.New("term of wal entry is too low") +) + +type WalDB struct { + consensus.ChainWAL +} + +func NewWalDB(chainWal consensus.ChainWAL) *WalDB { + return &WalDB{chainWal} +} + +func (wal *WalDB) SaveEntry(state raftpb.HardState, entries []raftpb.Entry) error { + if len(entries) != 0 { + walEnts, blocks := wal.convertFromRaft(entries) + + if err := wal.WriteRaftEntry(walEnts, blocks); err != nil { + return err + } + } + + // hardstate must save after entries since entries may include commited one + if !raft.IsEmptyHardState(state) { + // save hardstate + if err := wal.WriteHardState(&state); err != nil { + return err + } + } + + return nil +} + +func (wal *WalDB) convertFromRaft(entries []raftpb.Entry) ([]*consensus.WalEntry, []*types.Block) { + lenEnts := len(entries) + if lenEnts == 0 { + return nil, nil + } + + getWalEntryType := func(entry *raftpb.Entry) consensus.EntryType { + switch entry.Type { + case raftpb.EntryNormal: + if entry.Data != nil { + return consensus.EntryBlock + } else { + return consensus.EntryEmpty + } + case raftpb.EntryConfChange: + return consensus.EntryConfChange + default: + panic("not support raftpb entrytype") + } + } + + getWalData := func(entry *raftpb.Entry) (*types.Block, []byte, error) { + if entry.Type == raftpb.EntryNormal && entry.Data != nil { + block, err := unmarshalEntryData(entry.Data) + if err != nil { + return nil, nil, ErrInvalidEntry + } + + return block, block.BlockHash(), nil + } else { + return nil, entry.Data, nil + } + } + + blocks := make([]*types.Block, lenEnts) + walents := make([]*consensus.WalEntry, lenEnts) + + var ( + data []byte + block *types.Block + err error + ) + for i, entry := range entries { + if block, data, err = getWalData(&entry); err != nil { + panic("entry unmarshalEntryData error") + } + + blocks[i] = block + + walents[i] = &consensus.WalEntry{ + Type: getWalEntryType(&entry), + Term: entry.Term, + Index: entry.Index, + Data: data, + } + } + + return walents, blocks +} + +var ErrInvalidWalEntry = errors.New("invalid wal entry") +var ErrWalConvBlock = errors.New("failed to convert bytes of block from wal entry") + +func (wal *WalDB) convertWalToRaft(walEntry *consensus.WalEntry) (*raftpb.Entry, error) { + var raftEntry = &raftpb.Entry{Term: walEntry.Term, Index: walEntry.Index} + + getDataFromWalEntry := func(walEntry *consensus.WalEntry) ([]byte, error) { + if walEntry.Type != consensus.EntryBlock { + return nil, ErrWalConvBlock + } + block, err := wal.GetBlock(walEntry.Data) + if err != nil { + return nil, err + } + data, err := marshalEntryData(block) + if err != nil { + return nil, err + } + + return data, nil + } + + switch walEntry.Type { + case consensus.EntryConfChange: + raftEntry.Type = raftpb.EntryConfChange + raftEntry.Data = walEntry.Data + + case consensus.EntryEmpty: + raftEntry.Type = raftpb.EntryNormal + raftEntry.Data = nil + + case consensus.EntryBlock: + data, err := getDataFromWalEntry(walEntry) + if err != nil { + return nil, err + } + raftEntry.Data = data + default: + return nil, ErrInvalidWalEntry + } + + return raftEntry, nil +} + +var ( + ErrWalGetHardState = errors.New("failed to read hard state") + ErrWalGetLastIdx = errors.New("failed to read last Idx") +) + +// ReadAll returns hard state, all uncommitted entries +// - read last hard state +// - read all uncommited entries after snapshot index +func (wal *WalDB) ReadAll(snapshot *raftpb.Snapshot) (id *consensus.RaftIdentity, state *raftpb.HardState, ents []raftpb.Entry, err error) { + if id, err = wal.GetIdentity(); err != nil { + return nil, state, ents, err + } + + state, err = wal.GetHardState() + if err != nil { + return id, state, ents, ErrWalGetHardState + } + + commitIdx := state.Commit + lastIdx, err := wal.GetRaftEntryLastIdx() + if err != nil { + return id, state, ents, ErrWalGetLastIdx + } + + var snapIdx, snapTerm uint64 + if snapshot != nil { + snapIdx = snapshot.Metadata.Index + snapTerm = snapshot.Metadata.Term + } + + logger.Info().Uint64("snapidx", snapIdx).Uint64("snapterm", snapTerm).Uint64("commit", commitIdx).Uint64("last", lastIdx).Msg("read all entries of wal") + + for i := snapIdx + 1; i <= lastIdx; i++ { + walEntry, err := wal.GetRaftEntry(i) + if err != nil { + logger.Error().Err(err).Uint64("idx", i).Msg("failed to get raft entry") + return id, state, nil, err + } + + if walEntry.Term < snapTerm { + logger.Error().Str("wal", walEntry.ToString()).Err(ErrWalEntryTooLowTerm).Msg("invalid wal entry") + return id, state, nil, ErrWalEntryTooLowTerm + } + + raftEntry, err := wal.convertWalToRaft(walEntry) + if err != nil { + return id, state, nil, err + } + + logger.Debug().Str("walentry", walEntry.ToString()).Msg("read wal entry") + ents = append(ents, *raftEntry) + } + + return id, state, ents, nil +} diff --git a/consensus/impl/sbp/sbp.go b/consensus/impl/sbp/sbp.go index 29d5c1f14..b8abff42c 100644 --- a/consensus/impl/sbp/sbp.go +++ b/consensus/impl/sbp/sbp.go @@ -4,7 +4,6 @@ import ( "runtime" "time" - "github.com/aergoio/aergo-lib/db" "github.com/aergoio/aergo-lib/log" bc "github.com/aergoio/aergo/chain" "github.com/aergoio/aergo/config" @@ -31,10 +30,10 @@ type txExec struct { execTx bc.TxExecFn } -func newTxExec(blockNo types.BlockNo, ts int64, prevHash []byte, chainID []byte) chain.TxOp { +func newTxExec(cdb consensus.ChainDB, blockNo types.BlockNo, ts int64, prevHash []byte, chainID []byte) chain.TxOp { // Block hash not determined yet return &txExec{ - execTx: bc.NewTxExecutor(blockNo, ts, prevHash, contract.BlockFactory, chainID), + execTx: bc.NewTxExecutor(contract.ChainAccessor(cdb), blockNo, ts, prevHash, contract.BlockFactory, chainID), } } @@ -154,7 +153,7 @@ func (s *SimpleBlockFactory) Update(block *types.Block) { } // Save has nothging to do. -func (s *SimpleBlockFactory) Save(tx db.Transaction) error { +func (s *SimpleBlockFactory) Save(tx consensus.TxWriter) error { return nil } @@ -184,7 +183,7 @@ func (s *SimpleBlockFactory) Start() { txOp := chain.NewCompTxOp( s.txOp, - newTxExec(prevBlock.GetHeader().GetBlockNo()+1, ts, prevBlock.GetHash(), prevBlock.GetHeader().GetChainID()), + newTxExec(s.ChainDB, prevBlock.GetHeader().GetBlockNo()+1, ts, prevBlock.GetHash(), prevBlock.GetHeader().GetChainID()), ) block, err := chain.GenerateBlock(s, prevBlock, blockState, txOp, ts, false) @@ -198,7 +197,7 @@ func (s *SimpleBlockFactory) Start() { Str("TrieRoot", enc.ToString(block.GetHeader().GetBlocksRootHash())). Err(err).Msg("block produced") - chain.ConnectBlock(s, block, blockState) + chain.ConnectBlock(s, block, blockState, time.Second) } case <-s.quit: return @@ -220,3 +219,19 @@ func (s *SimpleBlockFactory) Info() string { func (s *SimpleBlockFactory) ConsensusInfo() *types.ConsensusInfo { return &types.ConsensusInfo{Type: GetName()} } + +func (s *SimpleBlockFactory) NeedNotify() bool { + return true +} + +func (s *SimpleBlockFactory) HasWAL() bool { + return false +} + +func (s *SimpleBlockFactory) ConfChange(req *types.MembershipChange) (*consensus.Member, error) { + return nil, consensus.ErrNotSupportedMethod +} + +func (s *SimpleBlockFactory) ClusterInfo() ([]*types.MemberAttr, []byte, error) { + return nil, nil, consensus.ErrNotSupportedMethod +} diff --git a/consensus/raftCommon.go b/consensus/raftCommon.go new file mode 100644 index 000000000..52000eb8f --- /dev/null +++ b/consensus/raftCommon.go @@ -0,0 +1,413 @@ +package consensus + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "encoding/gob" + "encoding/json" + "errors" + "fmt" + "github.com/aergoio/aergo/internal/enc" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/aergoio/aergo/types" + "github.com/aergoio/etcd/raft/raftpb" + "github.com/libp2p/go-libp2p-peer" + "net" + "net/url" +) + +type EntryType int8 + +const ( + EntryBlock EntryType = iota + EntryEmpty // it is generated when node becomes leader + EntryConfChange + InvalidMemberID = 0 +) + +var ( + WalEntryType_name = map[EntryType]string{ + 0: "EntryBlock", + 1: "EntryEmpty", + 2: "EntryConfChange", + } + + ErrURLInvalidScheme = errors.New("url has invalid scheme") + ErrURLInvalidPort = errors.New("url must have host:port style") + ErrInvalidMemberID = errors.New("member id of conf change doesn't match") + ErrEmptySnapData = errors.New("failed to decode snapshot data. encoded data is empty") +) + +type WalEntry struct { + Type EntryType + Term uint64 + Index uint64 + Data []byte // hash is set if Type is EntryBlock +} + +func (we *WalEntry) ToBytes() ([]byte, error) { + var val bytes.Buffer + encoder := gob.NewEncoder(&val) + if err := encoder.Encode(we); err != nil { + panic("raft entry to bytes error") + return nil, err + } + + return val.Bytes(), nil +} + +func (we *WalEntry) ToString() string { + if we == nil { + return "wal entry is nil" + } + return fmt.Sprintf("wal entry[type:%s, index:%d, term:%d", WalEntryType_name[we.Type], we.Index, we.Term) +} + +type RaftIdentity struct { + ID uint64 + Name string +} + +func (rid *RaftIdentity) ToString() string { + if rid == nil { + return "raft identity is nil" + } + return fmt.Sprintf("raft identity[name:%s, nodeid:%x]", rid.Name, rid.ID) +} + +type ChainWAL interface { + ChainDB + + IsWALInited() bool + GetBlock(blockHash []byte) (*types.Block, error) + ReadAll() (state raftpb.HardState, ents []raftpb.Entry, err error) + WriteRaftEntry([]*WalEntry, []*types.Block) error + GetRaftEntry(idx uint64) (*WalEntry, error) + HasWal() (bool, error) + GetRaftEntryLastIdx() (uint64, error) + GetHardState() (*raftpb.HardState, error) + WriteHardState(hardstate *raftpb.HardState) error + WriteSnapshot(snap *raftpb.Snapshot) error + GetSnapshot() (*raftpb.Snapshot, error) + WriteIdentity(id *RaftIdentity) error + GetIdentity() (*RaftIdentity, error) +} + +type SnapshotData struct { + Chain ChainSnapshot `json:"chain"` + Members []*Member `json:"members"` + RemovedMembers []*Member +} + +func NewSnapshotData(members []*Member, rmMembers []*Member, block *types.Block) *SnapshotData { + if block == nil { + return nil + } + + return &SnapshotData{ + Chain: *NewChainSnapshot(block), + Members: members, + RemovedMembers: rmMembers, + } +} + +func (snapd *SnapshotData) Encode() ([]byte, error) { + return json.Marshal(snapd) +} + +func (snapd *SnapshotData) Decode(data []byte) error { + if len(data) == 0 { + return ErrEmptySnapData + } + return json.Unmarshal(data, snapd) +} + +func (snapd *SnapshotData) Equal(t *SnapshotData) bool { + if !snapd.Chain.Equal(&t.Chain) { + return false + } + + if len(t.Members) != len(snapd.Members) { + return false + } + + for i, m := range snapd.Members { + tMbr := t.Members[i] + + if !m.Equal(tMbr) { + return false + } + } + + return true +} + +func (snapd *SnapshotData) ToString() string { + var buf string + + buf += fmt.Sprintf("chain:%s, ", snapd.Chain.ToString()) + + printMembers := func(mbrs []*Member, name string) { + if len(mbrs) > 0 { + buf += fmt.Sprintf("%s[", name) + + for i, m := range mbrs { + buf += fmt.Sprintf("#%d{%s}", i, m.ToString()) + } + + buf += fmt.Sprintf("]") + } + } + + printMembers(snapd.Members, "members") + printMembers(snapd.RemovedMembers, "removed members") + + return buf +} + +type ChainSnapshot struct { + No types.BlockNo `json:"no"` + Hash []byte `json:"hash"` +} + +func NewChainSnapshot(block *types.Block) *ChainSnapshot { + if block == nil { + return nil + } + + return &ChainSnapshot{No: block.BlockNo(), Hash: block.BlockHash()} +} + +func (csnap *ChainSnapshot) Equal(other *ChainSnapshot) bool { + return csnap.No == other.No && bytes.Equal(csnap.Hash, other.Hash) +} + +func (csnap *ChainSnapshot) ToString() string { + if csnap == nil || csnap.Hash == nil { + return "csnap: empty" + } + return fmt.Sprintf("chainsnap:(no=%d, hash=%s)", csnap.No, enc.ToString(csnap.Hash)) +} + +/* +func (csnap *ChainSnapshot) Encode() ([]byte, error) { + var val bytes.Buffer + + encoder := gob.NewEncoder(&val) + if err := encoder.Encode(csnap); err != nil { + logger.Fatal().Err(err).Msg("failed to encode chainsnap") + return nil, err + } + + return val.Bytes(), nil +} + +func DecodeChainSnapshot(data []byte) (*ChainSnapshot, error) { + var snap ChainSnapshot + var b bytes.Buffer + b.Write(data) + + if data == nil { + return nil, ErrEmptySnapData + } + + decoder := gob.NewDecoder(&b) + if err := decoder.Decode(&snap); err != nil { + logger.Fatal().Err(err).Msg("failed to decode chainsnap") + return nil, err + } + + return &snap, nil +}*/ + +func ConfStateToString(conf *raftpb.ConfState) string { + var buf string + + if len(conf.Nodes) > 0 { + buf = fmt.Sprintf("node") + for _, node := range conf.Nodes { + buf = buf + fmt.Sprintf("[%x]", node) + } + } + + if len(conf.Learners) > 0 { + buf = buf + fmt.Sprintf(".learner") + for _, learner := range conf.Learners { + buf = buf + fmt.Sprintf("[%x]", learner) + } + } + return buf +} + +func SnapToString(snap *raftpb.Snapshot, snapd *SnapshotData) string { + var buf string + buf = buf + fmt.Sprintf("snap=[index:%d term:%d conf:%s]", snap.Metadata.Index, snap.Metadata.Term, ConfStateToString(&snap.Metadata.ConfState)) + + if snapd != nil { + buf = buf + fmt.Sprintf(", %s", snapd.ToString()) + } + + return buf +} + +type Member struct { + types.MemberAttr +} + +func NewMember(name string, url string, peerID peer.ID, chainID []byte, when int64) *Member { + //check unique + m := &Member{MemberAttr: types.MemberAttr{Name: name, Url: url, PeerID: []byte(peerID)}} + + //make ID + m.CalculateMemberID(chainID, when) + + return m +} + +func (m *Member) Clone() *Member { + newM := Member{MemberAttr: types.MemberAttr{ID: m.ID, Name: m.Name, Url: m.Url}} + + copy(newM.PeerID, m.PeerID) + + return &newM +} + +func (m *Member) SetAttr(attr *types.MemberAttr) { + m.MemberAttr = *attr +} + +func (m *Member) SetMemberID(id uint64) { + m.ID = id +} + +func (m *Member) CalculateMemberID(chainID []byte, curTimestamp int64) { + var buf []byte + + buf = append(buf, []byte(m.Name)...) + buf = append(buf, []byte(chainID)...) + buf = append(buf, []byte(fmt.Sprintf("%d", curTimestamp))...) + + hash := sha1.Sum(buf) + m.ID = binary.LittleEndian.Uint64(hash[:8]) +} + +func (m *Member) IsValid() bool { + if m.ID == InvalidMemberID || len(m.PeerID) == 0 || len(m.Name) == 0 || len(m.Url) == 0 { + return false + } + + if _, err := ParseToUrl(m.Url); err != nil { + logger.Error().Err(err).Msg("parse url of member") + return false + } + + return true +} + +func (m *Member) GetPeerID() peer.ID { + return peer.ID(m.PeerID) +} + +func (m *Member) Equal(other *Member) bool { + return m.ID == other.ID && + bytes.Equal(m.PeerID, other.PeerID) && + m.Name == other.Name && + m.Url == other.Url && + bytes.Equal([]byte(m.PeerID), []byte(other.PeerID)) +} + +func (m *Member) ToString() string { + return fmt.Sprintf("{Name:%s, ID:%x, Url:%s, PeerID:%s}", m.Name, m.ID, m.Url, p2putil.ShortForm(peer.ID(m.PeerID))) +} + +func (m *Member) HasDuplicatedAttr(x *Member) bool { + if m.Name == x.Name || m.ID == x.ID || m.Url == x.Url || bytes.Equal(m.PeerID, x.PeerID) { + return true + } + + return false +} + +/* +func (m *Member) MarshalJSON() ([]byte, error) { + nj := NewJsonMember(m) + return json.Marshal(nj) +} + +func (m *Member) UnmarshalJSON(data []byte) error { + var err error + jm := JsonMember{} + + if err := json.Unmarshal(data, &jm); err != nil { + return err + } + + *m, err = jm.Member() + if err != nil { + return err + } + + return nil +} +type JsonMember struct { + ID MemberID `json:"id"` + Name string `json:"name"` + Url string `json:"url"` + PeerID string `json:"peerid"` +} + +func NewJsonMember(m *Member) JsonMember { + return JsonMember{ID: m.ID, Name: m.Name, Url: m.Url, PeerID: peer.IDB58Encode(m.PeerID)} +} + +func (jm *JsonMember) Member() (Member, error) { + peerID, err := peer.IDB58Decode(jm.PeerID) + if err != nil { + return Member{}, err + } + + return Member{ + ID: jm.ID, + Name: jm.Name, + Url: jm.Url, + PeerID: peerID, + }, nil +} +*/ + +// IsCompatible checks if name, url and peerid of this member are the same with other member +func (m *Member) IsCompatible(other *Member) bool { + return m.Name == other.Name && m.Url == other.Url && bytes.Equal(m.PeerID, other.PeerID) +} + +type MembersByName []*Member + +func (mbrs MembersByName) Len() int { + return len(mbrs) +} +func (mbrs MembersByName) Less(i, j int) bool { + return mbrs[i].Name < mbrs[j].Name +} +func (mbrs MembersByName) Swap(i, j int) { + mbrs[i], mbrs[j] = mbrs[j], mbrs[i] +} + +func ParseToUrl(urlstr string) (*url.URL, error) { + var urlObj *url.URL + var err error + + if urlObj, err = url.Parse(urlstr); err != nil { + return nil, err + } + + if urlObj.Scheme != "http" && urlObj.Scheme != "https" { + return nil, ErrURLInvalidScheme + } + + if _, _, err := net.SplitHostPort(urlObj.Host); err != nil { + return nil, ErrURLInvalidPort + } + + return urlObj, nil +} diff --git a/contract/contract.go b/contract/contract.go index f5cbe4b6c..674d557d5 100644 --- a/contract/contract.go +++ b/contract/contract.go @@ -49,7 +49,7 @@ func SetPreloadTx(tx *types.Tx, service int) { preLoadInfos[service].requestedTx = tx } -func Execute(bs *state.BlockState, tx *types.Tx, blockNo uint64, ts int64, prevBlockHash []byte, +func Execute(bs *state.BlockState, cdb ChainAccessor, tx *types.Tx, blockNo uint64, ts int64, prevBlockHash []byte, sender, receiver *state.V, preLoadService int) (rv string, events []*types.Event, usedFee *big.Int, err error) { txBody := tx.GetBody() @@ -97,7 +97,7 @@ func Execute(bs *state.BlockState, tx *types.Tx, blockNo uint64, ts int64, prevB if ex != nil { rv, events, cFee, err = PreCall(ex, bs, sender, contractState, blockNo, ts, receiver.RP(), prevBlockHash) } else { - stateSet := NewContext(bs, sender, receiver, contractState, sender.ID(), + stateSet := NewContext(bs, cdb, sender, receiver, contractState, sender.ID(), tx.GetHash(), blockNo, ts, prevBlockHash, "", true, false, receiver.RP(), preLoadService, txBody.GetAmountBigInt()) @@ -167,7 +167,7 @@ func preLoadWorker() { replyCh <- &loadedReply{tx, nil, err} continue } - stateSet := NewContext(bs, nil, receiver, contractState, txBody.GetAccount(), + stateSet := NewContext(bs, nil, nil, receiver, contractState, txBody.GetAccount(), tx.GetHash(), 0, 0, nil, "", false, false, receiver.RP(), reqInfo.preLoadService, txBody.GetAmountBigInt()) diff --git a/contract/contract_module.c b/contract/contract_module.c index b82915a3b..a5b510223 100644 --- a/contract/contract_module.c +++ b/contract/contract_module.c @@ -11,7 +11,7 @@ static const char *contract_str = "contract"; static const char *call_str = "call"; static const char *delegatecall_str = "delegatecall"; static const char *deploy_str = "deploy"; -static const char *amount_str = "amount"; +static const char *amount_str = "amount_value"; static const char *fee_str = "fee"; static void set_call_obj(lua_State *L, const char* obj_name) @@ -434,6 +434,7 @@ static int moduleVote(lua_State *L) { static const luaL_Reg call_methods[] = { {"value", call_value}, + {"amount", call_value}, {"gas", call_gas}, {NULL, NULL} }; @@ -455,6 +456,7 @@ static const luaL_Reg delegate_call_meta[] = { static const luaL_Reg deploy_call_methods[] = { {"value", deploy_value}, + {"amount", deploy_value}, {NULL, NULL} }; @@ -477,14 +479,14 @@ static const luaL_Reg contract_lib[] = { int luaopen_contract(lua_State *L) { luaL_register(L, contract_str, contract_lib); - lua_createtable(L, 0, 2); + lua_createtable(L, 0, 3); luaL_register(L, NULL, call_methods); lua_createtable(L, 0, 1); luaL_register(L, NULL, call_meta); lua_setmetatable(L, -2); lua_setfield(L, -2, call_str); - lua_createtable(L, 0, 2); + lua_createtable(L, 0, 1); luaL_register(L, NULL, delegate_call_methods); lua_createtable(L, 0, 1); luaL_register(L, NULL, delegate_call_meta); diff --git a/contract/db_module.c b/contract/db_module.c index 445f731e4..5892baa05 100644 --- a/contract/db_module.c +++ b/contract/db_module.c @@ -474,29 +474,28 @@ static int db_prepare(lua_State *L) int lua_db_release_resource(lua_State *L) { - if (luaL_findtable(L, LUA_REGISTRYINDEX, RESOURCE_RS_KEY, 0) != NULL) { - luaL_error(L, "cannot find the environment of the db module"); - } - /* T */ - lua_pushnil(L); /* T nil(key) */ - while (lua_next(L, -2)) { - if (lua_islightuserdata(L, -1)) - db_rs_close(L, (db_rs_t *)lua_topointer(L, -1), 0); + lua_getfield(L, LUA_REGISTRYINDEX, RESOURCE_RS_KEY); + if (lua_istable(L, -1)) { + /* T */ + lua_pushnil(L); /* T nil(key) */ + while (lua_next(L, -2)) { + if (lua_islightuserdata(L, -1)) + db_rs_close(L, (db_rs_t *)lua_topointer(L, -1), 0); + lua_pop(L, 1); + } lua_pop(L, 1); } - lua_pop(L, 1); - - if (luaL_findtable(L, LUA_REGISTRYINDEX, RESOURCE_PSTMT_KEY, 0) != NULL) { - luaL_error(L, "cannot find the environment of the db module"); - } - /* T */ - lua_pushnil(L); /* T nil(key) */ - while (lua_next(L, -2)) { - if (lua_islightuserdata(L, -1)) - db_pstmt_close(L, (db_pstmt_t *)lua_topointer(L, -1), 0); + lua_getfield(L, LUA_REGISTRYINDEX, RESOURCE_PSTMT_KEY); + if (lua_istable(L, -1)) { + /* T */ + lua_pushnil(L); /* T nil(key) */ + while (lua_next(L, -2)) { + if (lua_islightuserdata(L, -1)) + db_pstmt_close(L, (db_pstmt_t *)lua_topointer(L, -1), 0); + lua_pop(L, 1); + } lua_pop(L, 1); } - lua_pop(L, 1); return 0; } diff --git a/contract/debug.c b/contract/debug.c index 51ed6f233..fc552f43b 100644 --- a/contract/debug.c +++ b/contract/debug.c @@ -65,6 +65,60 @@ static int reset_breakpoints_lua(lua_State *L) { return 0; } +static int set_watchpoint_lua(lua_State *L) { + const char* code = luaL_checkstring (L, 1); + CSetWatchPoint(code); + + return 0; +} + +static int delete_watchpoint_lua(lua_State *L) { + double idx = luaL_checknumber (L, 1); + CDelWatchPoint(idx); + + return 0; +} + +static int reset_watchpoints_lua(lua_State *L) { + ResetWatchPoints(); + + return 0; +} + +static int len_watchpoints_lua(lua_State *L) { + int len = CLenWatchPoints(); + + lua_pushnumber(L, len); + + return 1; +} + +static int list_watchpoints_lua(lua_State *L) { + int len = CLenWatchPoints(); + int i = 1; + + lua_newtable(L); + + for(i = 1; i <= len; i++) { + char* watch_exp = CGetWatchPoint(i); + lua_pushstring(L, watch_exp); + lua_rawseti(L, -2, i); + free(watch_exp); + } + + return 1; +} + +static int get_watchpoint_lua(lua_State *L) { + double idx = luaL_checknumber (L, 1); + char* watch_exp = CGetWatchPoint(idx); + + lua_pushstring(L, watch_exp); + free(watch_exp); + + return 1; +} + const char* vm_set_debug_hook(lua_State *L) { lua_pushcfunction(L, get_contract_info_lua); @@ -80,6 +134,19 @@ const char* vm_set_debug_hook(lua_State *L) lua_pushcfunction(L, reset_breakpoints_lua); lua_setglobal(L, "__reset_breakpoints"); + lua_pushcfunction(L, set_watchpoint_lua); + lua_setglobal(L, "__set_watchpoint"); + lua_pushcfunction(L, get_watchpoint_lua); + lua_setglobal(L, "__get_watchpoint"); + lua_pushcfunction(L, delete_watchpoint_lua); + lua_setglobal(L, "__delete_watchpoint"); + lua_pushcfunction(L, list_watchpoints_lua); + lua_setglobal(L, "__list_watchpoints"); + lua_pushcfunction(L, reset_watchpoints_lua); + lua_setglobal(L, "__reset_watchpoints"); + lua_pushcfunction(L, len_watchpoints_lua); + lua_setglobal(L, "__len_watchpoints"); + char* code = (char *)GetDebuggerCode(); luaL_loadstring(L, code); int err = lua_pcall(L, 0, LUA_MULTRET, 0); diff --git a/contract/errors.go b/contract/errors.go index df2312b0e..9e867a229 100644 --- a/contract/errors.go +++ b/contract/errors.go @@ -5,12 +5,6 @@ package contract -import "errors" - -var ( - errVmConstructorIsNotPayable = errors.New("constructor is not payable") -) - type ErrSystem interface { System() bool } @@ -20,7 +14,7 @@ func isSystemError(err error) bool { return ok && sErr.System() } -type vmStartError struct {} +type vmStartError struct{} func newVmStartError() error { return &vmStartError{} diff --git a/contract/hook_dbg.go b/contract/hook_dbg.go index 7ae3f82bf..f3f459eec 100644 --- a/contract/hook_dbg.go +++ b/contract/hook_dbg.go @@ -24,6 +24,7 @@ type contract_info struct { } var contract_info_map = make(map[string]*contract_info) +var watchpoints = list.New() func (ce *Executor) setCountHook(limit C.int) { if ce == nil || ce.L == nil { @@ -143,7 +144,6 @@ func HasBreakPoint(contract_id_hex string, line uint64) bool { //export PrintBreakPoints func PrintBreakPoints() { if len(contract_info_map) == 0 { - fmt.Printf("(empty)\n") return } for _, info := range contract_info_map { @@ -162,6 +162,42 @@ func ResetBreakPoints() { } } +func SetWatchPoint(code string) error { + if code == "" { + return errors.New("Empty string cannot be set") + } + + watchpoints.PushBack(code) + + return nil +} + +func DelWatchPoint(idx uint64) error { + if uint64(watchpoints.Len()) < idx { + return errors.New("invalid index") + } + + var i uint64 = 0 + for e := watchpoints.Front(); e != nil; e = e.Next() { + i++ + if i >= idx { + watchpoints.Remove(e) + return nil + } + } + + return nil +} + +func ListWatchPoints() *list.List { + return watchpoints +} + +//export ResetWatchPoints +func ResetWatchPoints() { + watchpoints = list.New() +} + func UpdateContractInfo(contract_id_hex string, path string) { if path != "" { @@ -251,11 +287,50 @@ func CHasBreakPoint(contract_id_hex_c *C.char, line_c C.double) C.int { return C.int(0) } +//export CSetWatchPoint +func CSetWatchPoint(code_c *C.char) { + code := C.GoString(code_c) + + err := SetWatchPoint(code) + if err != nil { + ctrLog.Error().Err(err).Msg("Fail to set watchpoint") + } +} + +//export CDelWatchPoint +func CDelWatchPoint(idx_c C.double) { + idx := uint64(idx_c) + + err := DelWatchPoint(idx) + if err != nil { + ctrLog.Error().Err(err).Msg("Fail to del watchpoint") + } +} + +//export CGetWatchPoint +func CGetWatchPoint(idx_c C.int) *C.char { + idx := int(idx_c) + var i int = 0 + for e := watchpoints.Front(); e != nil; e = e.Next() { + i++ + if i == idx { + return C.CString(e.Value.(string)) + } + } + + return C.CString("") +} + +//export CLenWatchPoints +func CLenWatchPoints() C.int { + return C.int(watchpoints.Len()) +} + //export GetDebuggerCode func GetDebuggerCode() *C.char { return C.CString(` -package.preload['__debugger'] = function() + package.preload['__debugger'] = function() --{{{ history @@ -320,7 +395,6 @@ package.preload['__debugger'] = function() local step_level = {main=0} local stack_level = {main=0} local trace_level = {main=0} - local trace_lines = false local ret_file, ret_line, ret_name local current_thread = 'main' local started = false @@ -360,8 +434,8 @@ package.preload['__debugger'] = function() currently set level (see 'set'). ]], - delallb = [[ - delallb -- removes all breakpoints| + resetb = [[ + resetb -- removes all breakpoints| ]], setw = [[ @@ -378,8 +452,8 @@ package.preload['__debugger'] = function() The index is that returned when the watch expression was set by setw. ]], - delallw = [[ - delallw -- removes all watch expressions| + resetw = [[ + resetw -- removes all watch expressions| ]], run = [[ @@ -482,14 +556,6 @@ package.preload['__debugger'] = function() Can also be called from a script as dump(var,depth). ]], - tron = [[ - tron [crl] -- turn trace on for (c)alls, (r)etuns, (l)lines| - - If no parameter is given then tracing is turned off. - When tracing is turned on a line is printed to the console for each - debug 'event' selected. c=function calls, r=function returns, l=lines. - ]], - trace = [[ trace -- dumps a stack trace| @@ -533,10 +599,6 @@ package.preload['__debugger'] = function() the results, and '=var' will just print the value of 'var'. ]], - what = [[ - what -- show where is defined (if known)| - ]], - } --}}} @@ -552,17 +614,17 @@ package.preload['__debugger'] = function() if not field then return debug.getinfo(level) end local what if field == 'name' or field == 'namewhat' then - what = 'n' + what = 'n' elseif field == 'what' or field == 'source' or field == 'linedefined' or field == 'lastlinedefined' or field == 'short_src' then - what = 'S' + what = 'S' elseif field == 'currentline' then - what = 'l' + what = 'l' elseif field == 'nups' then - what = 'u' + what = 'u' elseif field == 'func' then - what = 'f' + what = 'f' else - return debug.getinfo(level,field) + return debug.getinfo(level,field) end local ar = debug.getinfo(level,what) if ar then return ar[field] else return nil end @@ -578,46 +640,39 @@ package.preload['__debugger'] = function() --}}} --{{{ local function dumpval( level, name, value, limit ) - local dumpvisited - local function dumpval( level, name, value, limit ) local index if type(name) == 'number' then - index = string.format('[%d] = ',name) + index = string.format('[%d] = ',name) elseif type(name) == 'string' and (name == '__VARSLEVEL__' or name == '__ENVIRONMENT__' or name == '__GLOBALS__' or name == '__UPVALUES__' or name == '__LOCALS__') then --ignore these, they are debugger generated - return + return elseif type(name) == 'string' and string.find(name,'^[_%a][_.%w]*$') then - index = name ..' = ' + index = name ..' = ' else - index = string.format('[%q] = ',tostring(name)) + index = string.format('[%q] = ',tostring(name)) end if type(value) == 'table' then - if dumpvisited[value] then - indented( level, index, string.format('ref%q;',dumpvisited[value]) ) - else - dumpvisited[value] = tostring(value) if (limit or 0) > 0 and level+1 >= limit then - indented( level, index, dumpvisited[value] ) + indented( level, index, tostring(value), ';' ) else - indented( level, index, '{ -- ', dumpvisited[value] ) - for n,v in pairs(value) do - dumpval( level+1, n, v, limit ) - end - indented( level, '};' ) + indented( level, index, '{' ) + for n,v in pairs(value) do + dumpval( level+1, n, v, limit ) + end + indented( level, '};' ) end - end else - if type(value) == 'string' then - if string.len(value) > 40 then - indented( level, index, '[[', value, ']];' ) + if type(value) == 'string' then + if string.len(value) > 40 then + indented( level, index, '[[', value, ']];' ) + else + indented( level, index, string.format('%q',value), ';' ) + end else - indented( level, index, string.format('%q',value), ';' ) + indented( level, index, tostring(value), ';' ) end - else - indented( level, index, tostring(value), ';' ) - end end end @@ -625,7 +680,6 @@ package.preload['__debugger'] = function() --{{{ local function dumpvar( value, limit, name ) local function dumpvar( value, limit, name ) - dumpvisited = {} dumpval( 0, name or tostring(value), value, limit ) end @@ -650,22 +704,22 @@ package.preload['__debugger'] = function() local f = io.open(file,'r') if not f then - io.write('Cannot find '..file..' for contract '..base58_addr..'\n') + io.write('Cannot find '..file..' for contract '..base58_addr..'\n') return end local i = 0 for l in f:lines() do - i = i + 1 - if i >= (line-before) then - if i > (line+after) then break end - if i == line then - io.write(i..'***\t'..l..'\n') - else - io.write(i..'\t'..l..'\n') + i = i + 1 + if i >= (line-before) then + if i > (line+after) then break end + if i == line then + io.write(i..'***\t'..l..'\n') + else + io.write(i..'\t'..l..'\n') + end end end - end f:close() @@ -693,38 +747,38 @@ package.preload['__debugger'] = function() traceinfo = {} --traceinfo.pausemsg = pausemsg for ar,i in gi(l) do - table.insert( traceinfo, ar ) - if ar.what ~= 'C' then - local names = {} - local values = {} - - for n,v in gl(i-1,0) do - --for n,v in gl(i,0) do - if string.sub(n,1,1) ~= '(' then --ignore internal control variables - table.insert( names, n ) - table.insert( values, v ) - end - end - if #names > 0 then - ar.lnames = names - ar.lvalues = values - end - end - if ar.func then - local names = {} - local values = {} - for n,v in gu(ar.func,0) do - if string.sub(n,1,1) ~= '(' then --ignore internal control variables - table.insert( names, n ) - table.insert( values, v ) - end + table.insert( traceinfo, ar ) + if ar.what ~= 'C' then + local names = {} + local values = {} + + for n,v in gl(i-1,0) do + --for n,v in gl(i,0) do + if string.sub(n,1,1) ~= '(' then --ignore internal control variables + table.insert( names, n ) + table.insert( values, v ) + end + end + if #names > 0 then + ar.lnames = names + ar.lvalues = values + end end - if #names > 0 then - ar.unames = names - ar.uvalues = values + if ar.func then + local names = {} + local values = {} + for n,v in gu(ar.func,0) do + if string.sub(n,1,1) ~= '(' then --ignore internal control variables + table.insert( names, n ) + table.insert( values, v ) + end + end + if #names > 0 then + ar.unames = names + ar.uvalues = values + end end end - end end --}}} @@ -733,13 +787,13 @@ package.preload['__debugger'] = function() local function trace(set) local mark for level,ar in ipairs(traceinfo) do - if level == set then - mark = '***' - else - mark = '' - end - local contract_id_base58, _ = __get_contract_info(ar.source) - io.write('['..level..']'..mark..'\t'..(ar.name or ar.what)..' in '..(contract_id_base58 or ar.short_src)..':'..ar.currentline..'\n') + if level == set then + mark = '***' + else + mark = '' + end + local contract_id_base58, _ = __get_contract_info(ar.source) + io.write('['..level..']'..mark..'\t'..(ar.name or ar.what)..' in '..(contract_id_base58 or ar.short_src)..':'..ar.currentline..'\n') end end @@ -783,37 +837,37 @@ package.preload['__debugger'] = function() local func = ar.func if func then - i = 1 - while true do - local name, value = debug.getupvalue(func, i) - if not name then break end - if string.sub(name,1,1) ~= '(' then --NB: ignoring internal control variables - vars[name] = value - vars.__UPVALUES__[i] = name + i = 1 + while true do + local name, value = debug.getupvalue(func, i) + if not name then break end + if string.sub(name,1,1) ~= '(' then --NB: ignoring internal control variables + vars[name] = value + vars.__UPVALUES__[i] = name + end + i = i + 1 end - i = i + 1 - end - vars.__ENVIRONMENT__ = getfenv(func) + vars.__ENVIRONMENT__ = getfenv(func) end vars.__GLOBALS__ = getfenv(0) i = 1 while true do - local name, value = debug.getlocal(lvl, i) - if not name then break end - if string.sub(name,1,1) ~= '(' then --NB: ignoring internal control variables - vars[name] = value - vars.__LOCALS__[i] = name - end - i = i + 1 + local name, value = debug.getlocal(lvl, i) + if not name then break end + if string.sub(name,1,1) ~= '(' then --NB: ignoring internal control variables + vars[name] = value + vars.__LOCALS__[i] = name + end + i = i + 1 end vars.__VARSLEVEL__ = level if func then - --NB: Do not do this until finished filling the vars table - setmetatable(vars, { __index = getfenv(func), __newindex = getfenv(func) }) + --NB: Do not do this until finished filling the vars table + setmetatable(vars, { __index = getfenv(func), __newindex = getfenv(func) }) end --NB: Do not read or write the vars table anymore else the metatable functions will get invoked! @@ -822,13 +876,13 @@ package.preload['__debugger'] = function() local contract_id_hex = getinfo(lvl, 'source') if string.find(contract_id_hex, '@') == 1 then - contract_id_hex = string.sub(contract_id_hex, 2) + contract_id_hex = string.sub(contract_id_hex, 2) end local contract_id_base58, _ = __get_contract_info(contract_id_hex) if not line then - line = getinfo(lvl, 'currentline') + line = getinfo(lvl, 'currentline') end return vars,contract_id_hex,contract_id_base58,line @@ -852,13 +906,13 @@ package.preload['__debugger'] = function() i = 1 while true do - local name, value = debug.getlocal(level, i) - if not name then break end - if vars[name] and string.sub(name,1,1) ~= '(' then --NB: ignoring internal control variables - debug.setlocal(level, i, vars[name]) - written_vars[name] = true - end - i = i + 1 + local name, value = debug.getlocal(level, i) + if not name then break end + if vars[name] and string.sub(name,1,1) ~= '(' then --NB: ignoring internal control variables + debug.setlocal(level, i, vars[name]) + written_vars[name] = true + end + i = i + 1 end local ar = debug.getinfo(level, 'f') @@ -872,10 +926,10 @@ package.preload['__debugger'] = function() local name, value = debug.getupvalue(func, i) if not name then break end if vars[name] and string.sub(name,1,1) ~= '(' then --NB: ignoring internal control variables - if not written_vars[name] then - debug.setupvalue(func, i, vars[name]) - end - written_vars[name] = true + if not written_vars[name] then + debug.setupvalue(func, i, vars[name]) + end + written_vars[name] = true end i = i + 1 end @@ -887,38 +941,13 @@ package.preload['__debugger'] = function() --}}} --{{{ local function trace_event(event, line, level) - local function print_trace(level,depth,event,file,line,name) - - --NB: level here is relative to the caller of trace_event, so offset by 2 to get to there - level = level + 2 - - local contract_id_hex = contract_id_hex or getinfo(level,'short_src') - local line = line or getinfo(level,'currentline') - local name = name or getinfo(level,'name') - - local prefix = '' - if current_thread ~= 'main' then prefix = '['..tostring(current_thread)..'] ' end - - io.write(prefix.. - string.format('%02i.', depth).. --os clock removed, but how about blockHeight? TODO - string.rep('.',depth%32).. - (contract_id_hex or '')..' ('..(line or '')..') '.. - (name or '').. - ' ('..event..')\n') - - end - - local function trace_event(event, line, level) + local function trace_event(event, line, level) if event ~= 'line' then return end local slevel = stack_level[current_thread] local tlevel = trace_level[current_thread] - if trace_lines then - print_trace(level,slevel,'l') - end - trace_level[current_thread] = stack_level[current_thread] end @@ -933,15 +962,15 @@ package.preload['__debugger'] = function() local prefix = '' if current_thread ~= 'main' then prefix = '['..tostring(current_thread)..'] ' end if ev == events.STEP then - io.write(prefix..'Paused at contract '..contract_id_base58..' line '..line..' ('..stack_level[current_thread]..')\n') + io.write(prefix..'Paused at contract '..contract_id_base58..' line '..line..' ('..stack_level[current_thread]..')\n') elseif ev == events.BREAK then - io.write(prefix..'Paused at contract '..contract_id_base58..' line '..line..' ('..stack_level[current_thread]..') (breakpoint)\n') + io.write(prefix..'Paused at contract '..contract_id_base58..' line '..line..' ('..stack_level[current_thread]..') (breakpoint)\n') elseif ev == events.WATCH then - io.write(prefix..'Paused at contract '..contract_id_base58..' line '..line..' ('..stack_level[current_thread]..')'..' (watch expression '..idx_watch.. ': ['..watches[idx_watch].exp..'])\n') + io.write(prefix..'Paused at contract '..contract_id_base58..' line '..line..' ('..stack_level[current_thread]..')'..' (watch expression '..idx_watch.. ': ['..__get_watchpoint(idx_watch)..'])\n') elseif ev == events.SET then - --do nothing + --do nothing else - io.write(prefix..'Error in application: '..contract_id_base58..' line '..line..'\n') + io.write(prefix..'Error in application: '..contract_id_base58..' line '..line..'\n') end return vars, contract_id_base58, line end @@ -970,349 +999,309 @@ package.preload['__debugger'] = function() -- S for a string local function getargs(spec) - local res={} - local char,arg - local ptr=1 - for i=1,string.len(spec) do - char = string.sub(spec,i,i) - if char == 'F' then - _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) - if not arg or arg == '' then arg = '-' end - if arg == '-' then arg = breakfile end - elseif char == 'L' then - _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) - if not arg or arg == '' then arg = '-' end - if arg == '-' then arg = breakline end - arg = tonumber(arg) or 0 - elseif char == 'N' then - _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) - if not arg or arg == '' then arg = '0' end - arg = tonumber(arg) or 0 - elseif char == 'V' then - _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) - if not arg or arg == '' then arg = '' end - elseif char == 'S' then - _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) - if not arg or arg == '' then arg = '' end - else - arg = '' + local res={} + local char,arg + local ptr=1 + for i=1,string.len(spec) do + char = string.sub(spec,i,i) + if char == 'F' then + _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) + if not arg or arg == '' then arg = '-' end + if arg == '-' then arg = breakfile end + elseif char == 'L' then + _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) + if not arg or arg == '' then arg = '-' end + if arg == '-' then arg = breakline end + arg = tonumber(arg) or 0 + elseif char == 'N' then + _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) + if not arg or arg == '' then arg = '0' end + arg = tonumber(arg) or 0 + elseif char == 'V' then + _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) + if not arg or arg == '' then arg = '' end + elseif char == 'S' then + _,ptr,arg = string.find(args..' ','%s*([%w%p]*)%s*',ptr) + if not arg or arg == '' then arg = '' end + else + arg = '' + end + table.insert(res,arg or '') end - table.insert(res,arg or '') - end - return unpack(res) + return unpack(res) end --}}} while true do - io.write('[DEBUG]> ') - local line = io.read('*line') - if line == nil then io.write('\n'); line = 'exit' end - - if string.find(line, '^[a-z]+') then - command = string.sub(line, string.find(line, '^[a-z]+')) - args = string.gsub(line,'^[a-z]+%s*','',1) --strip command off line - else - command = '' - end + io.write('[DEBUG]> ') + local line = io.read('*line') + if line == nil then io.write('\n'); line = 'exit' end - if command == 'setb' then - --{{{ set breakpoint - - local line, contract_id_hex = getargs('LF') - if contract_id_hex ~= '' and line ~= '' then - __set_breakpoint(contract_id_hex,line) + if string.find(line, '^[a-z]+') then + command = string.sub(line, string.find(line, '^[a-z]+')) + args = string.gsub(line,'^[a-z]+%s*','',1) --strip command off line else - io.write('Bad request\n') + command = '' end - --}}} - - elseif command == 'delb' then - --{{{ delete breakpoint + if command == 'setb' then + --{{{ set breakpoint - local line, contract_id_hex = getargs('LF') - if contract_id_hex ~= '' and line ~= '' then - __delete_breakpoint(contract_id_hex, line) - else - io.write('Bad request\n') - end + local line, contract_id_hex = getargs('LF') + if contract_id_hex ~= '' and line ~= '' then + __set_breakpoint(contract_id_hex,line) + else + io.write('Bad request\n') + end - --}}} + --}}} - elseif command == 'delallb' then - --{{{ delete all breakpoints - --TODO - io.write('All breakpoints deleted\n') - --}}} + elseif command == 'delb' then + --{{{ delete breakpoint - elseif command == 'listb' then - --{{{ list breakpoints - __print_breakpoints() - --}}} + local line, contract_id_hex = getargs('LF') + if contract_id_hex ~= '' and line ~= '' then + __delete_breakpoint(contract_id_hex, line) + else + io.write('Bad request\n') + end - elseif command == 'setw' then - --{{{ set watch expression + --}}} - if args and args ~= '' then - local func = loadstring('return(' .. args .. ')') - local newidx = #watches + 1 - watches[newidx] = {func = func, exp = args} - io.write('Set watch exp no. ' .. newidx..'\n') - else - io.write('Bad request\n') - end + elseif command == 'resetb' then + --{{{ delete all breakpoints + --TODO + io.write('All breakpoints deleted\n') + --}}} - --}}} + elseif command == 'listb' then + --{{{ list breakpoints + __print_breakpoints() + --}}} - elseif command == 'delw' then - --{{{ delete watch expression + elseif command == 'setw' then + --{{{ set watch expression - local index = tonumber(args) - if index then - watches[index] = nil - io.write('Watch expression deleted\n') - else - io.write('Bad request\n') - end + if args and args ~= '' then + __set_watchpoint(args) + io.write('Set watch exp no. ' .. __len_watchpoints() ..'\n') + else + io.write('Bad request\n') + end - --}}} + --}}} - elseif command == 'delallw' then - --{{{ delete all watch expressions - watches = {} - io.write('All watch expressions deleted\n') - --}}} + elseif command == 'delw' then + --{{{ delete watch expression - elseif command == 'listw' then - --{{{ list watch expressions - for i, v in pairs(watches) do - io.write('Watch exp. ' .. i .. ': ' .. v.exp..'\n') - end - --}}} - - elseif command == 'run' then - --{{{ run until breakpoint - step_into = false - step_over = false - return 'cont' - --}}} - - elseif command == 'step' then - --{{{ step N lines (into functions) - local N = tonumber(args) or 1 - step_over = false - step_into = true - step_lines = tonumber(N or 1) - return 'cont' - --}}} - - elseif command == 'over' then - --{{{ step N lines (over functions) - local N = tonumber(args) or 1 - step_into = false - step_over = true - step_lines = tonumber(N or 1) - step_level[current_thread] = stack_level[current_thread] - return 'cont' - --}}} - - elseif command == 'out' then - --{{{ step N lines (out of functions) - local N = tonumber(args) or 1 - step_into = false - step_over = true - step_lines = 1 - step_level[current_thread] = stack_level[current_thread] - tonumber(N or 1) - return 'cont' - --}}} - - elseif command == 'set' then - --{{{ set/show context level - local level = args - if level and level == '' then level = nil end - if level then return level end - --}}} - - elseif command == 'vars' then - --{{{ list context variables - local depth = args - if depth and depth == '' then depth = nil end - depth = tonumber(depth) or 1 - dumpvar(eval_env, depth+1, 'variables') - --}}} - - elseif command == 'glob' then - --{{{ list global variables - local depth = args - if depth and depth == '' then depth = nil end - depth = tonumber(depth) or 1 - dumpvar(eval_env.__GLOBALS__,depth+1,'globals') - --}}} - - elseif command == 'fenv' then - --{{{ list function environment variables - local depth = args - if depth and depth == '' then depth = nil end - depth = tonumber(depth) or 1 - dumpvar(eval_env.__ENVIRONMENT__,depth+1,'environment') - --}}} - - elseif command == 'ups' then - --{{{ list upvalue names - dumpvar(eval_env.__UPVALUES__,2,'upvalues') - --}}} - - elseif command == 'locs' then - --{{{ list locals names - dumpvar(eval_env.__LOCALS__,2,'upvalues') - --}}} - - elseif command == 'what' then - --{{{ show where a function is defined - if args and args ~= '' then - local v = eval_env - local n = nil - for w in string.gmatch(args,'[%w_]+') do - v = v[w] - if n then n = n..'.'..w else n = w end - if not v then break end - end - if type(n) ~= 'string' then - io.write('Invalid function name given\n') - elseif type(v) == 'function' then - local def = debug.getinfo(v,'S') - if def then - io.write(def.what..' in '..def.short_src..' '..def.linedefined..'..'..def.lastlinedefined..'\n') + local index = tonumber(args) + if index then + __delete_watchpoint(index) + io.write('Watch expression deleted\n') else - io.write('Cannot get info for '..n..'\n') + io.write('Bad request\n') end - else - io.write(n..' is not a function\n') - end - else - io.write('Bad request\n') - end - --}}} - - elseif command == 'dump' then - --{{{ dump a variable - local name, depth = getargs('VN') - if name ~= '' then - if depth == '' or depth == 0 then depth = nil end - depth = tonumber(depth or 1) - local v = eval_env - local n = nil - for w in string.gmatch(name,'[^%.]+') do --get everything between dots - if tonumber(w) then - v = v[tonumber(w)] + + --}}} + + elseif command == 'resetw' then + --{{{ delete all watch expressions + __reset_watchpoints() + io.write('All watch expressions deleted\n') + --}}} + + elseif command == 'listw' then + --{{{ list watch expressions + for i, v in pairs(__list_watchpoints()) do + io.write(i .. ': ' .. v..'\n') + end + --}}} + + elseif command == 'run' then + --{{{ run until breakpoint + step_into = false + step_over = false + return 'cont' + --}}} + + elseif command == 'step' then + --{{{ step N lines (into functions) + local N = tonumber(args) or 1 + step_over = false + step_into = true + step_lines = tonumber(N or 1) + return 'cont' + --}}} + + elseif command == 'over' then + --{{{ step N lines (over functions) + local N = tonumber(args) or 1 + step_into = false + step_over = true + step_lines = tonumber(N or 1) + step_level[current_thread] = stack_level[current_thread] + return 'cont' + --}}} + + elseif command == 'out' then + --{{{ step N lines (out of functions) + local N = tonumber(args) or 1 + step_into = false + step_over = true + step_lines = 1 + step_level[current_thread] = stack_level[current_thread] - tonumber(N or 1) + return 'cont' + --}}} + + elseif command == 'set' then + --{{{ set/show context level + local level = args + if level and level == '' then level = nil end + if level then return level end + --}}} + + elseif command == 'vars' then + --{{{ list context variables + local depth = args + if depth and depth == '' then depth = nil end + depth = tonumber(depth) or 1 + dumpvar(eval_env, depth+1, 'variables') + --}}} + + elseif command == 'glob' then + --{{{ list global variables + local depth = args + if depth and depth == '' then depth = nil end + depth = tonumber(depth) or 1 + dumpvar(eval_env.__GLOBALS__,depth+1,'globals') + --}}} + + elseif command == 'fenv' then + --{{{ list function environment variables + local depth = args + if depth and depth == '' then depth = nil end + depth = tonumber(depth) or 1 + dumpvar(eval_env.__ENVIRONMENT__,depth+1,'environment') + --}}} + + elseif command == 'ups' then + --{{{ list upvalue names + dumpvar(eval_env.__UPVALUES__,2,'upvalues') + --}}} + + elseif command == 'locs' then + --{{{ list locals names + dumpvar(eval_env.__LOCALS__,2,'upvalues') + --}}} + + elseif command == 'dump' then + --{{{ dump a variable + local name, depth = getargs('VN') + if name ~= '' then + if depth == '' or depth == 0 then depth = nil end + depth = tonumber(depth or 1) + local v = eval_env + local n = nil + for w in string.gmatch(name,'[^%.]+') do --get everything between dots + if tonumber(w) then + v = v[tonumber(w)] + else + v = v[w] + end + if n then n = n..'.'..w else n = w end + if not v then break end + end + dumpvar(v,depth+1,n) else - v = v[w] + io.write('Bad request\n') end - if n then n = n..'.'..w else n = w end - if not v then break end - end - dumpvar(v,depth+1,n) - else - io.write('Bad request\n') - end - --}}} + --}}} - elseif command == 'show' then - --{{{ show contract around a line or the current breakpoint - local line, contract_id_hex, before, after = getargs('LFNN') - if before == 0 then before = 10 end - if after == 0 then after = before end + elseif command == 'show' then + --{{{ show contract around a line or the current breakpoint + local line, contract_id_hex, before, after = getargs('LFNN') + if before == 0 then before = 10 end + if after == 0 then after = before end - if contract_id_hex ~= '' and contract_id_hex ~= '=stdin' then - show(contract_id_hex,line,before,after) - else - io.write('Nothing to show\n') - end - --}}} - - elseif command == 'tron' then - --{{{ turn tracing on/off - local option = getargs('S') - trace_calls = false - trace_returns = false - trace_lines = false - if string.find(option,'c') then trace_calls = true end - if string.find(option,'r') then trace_returns = true end - if string.find(option,'l') then trace_lines = true end - --}}} - - elseif command == 'trace' then - --{{{ dump a stack trace - trace(eval_env.__VARSLEVEL__) - --}}} - - elseif command == 'info' then - --{{{ dump all debug info captured - info() - --}}} - - elseif command == 'pause' then - --{{{ not allowed in here - io.write('pause() should only be used in the script you are debugging\n') - --}}} - - elseif command == 'help' then - --{{{ help - local command = getargs('S') - if command ~= '' and hints[command] then - io.write(hints[command]..'\n') - else - local l = {} - for k,v in pairs(hints) do - local _,_,h = string.find(v,'(.+)|') - l[#l+1] = h..'\n' - end - table.sort(l) - io.write(table.concat(l)) - end - --}}} + if contract_id_hex ~= '' and contract_id_hex ~= '=stdin' then + show(contract_id_hex,line,before,after) + else + io.write('Nothing to show\n') + end + --}}} + + elseif command == 'trace' then + --{{{ dump a stack trace + trace(eval_env.__VARSLEVEL__) + --}}} + + elseif command == 'info' then + --{{{ dump all debug info captured + info() + --}}} + + elseif command == 'pause' then + --{{{ not allowed in here + io.write('pause() should only be used in the script you are debugging\n') + --}}} + + elseif command == 'help' then + --{{{ help + local command = getargs('S') + if command ~= '' and hints[command] then + io.write(hints[command]..'\n') + else + local l = {} + for k,v in pairs(hints) do + local _,_,h = string.find(v,'(.+)|') + l[#l+1] = h..'\n' + end + table.sort(l) + io.write(table.concat(l)) + end + --}}} - elseif command == 'exit' then - --{{{ exit debugger - return 'stop' - --}}} + elseif command == 'exit' then + --{{{ exit debugger + return 'stop' + --}}} - elseif line ~= '' then - --{{{ just execute whatever it is in the current context + elseif line ~= '' then + --{{{ just execute whatever it is in the current context - --map line starting with '=...' to 'return ...' - if string.sub(line,1,1) == '=' then line = string.gsub(line,'=','return ',1) end + --map line starting with '=...' to 'return ...' + if string.sub(line,1,1) == '=' then line = string.gsub(line,'=','return ',1) end - local ok, func = pcall(loadstring,line) - if ok and func==nil then -- auto-print variables - ok, func = pcall(loadstring,'print(' .. line .. ')') - end - if func == nil then --Michael.Bringmann@lsi.com - io.write('Compile error: '..line..'\n') - elseif not ok then - io.write('Compile error: '..func..'\n') - else - setfenv(func, eval_env) - local res = {pcall(func)} - if res[1] then - if res[2] then - table.remove(res,1) - for _,v in ipairs(res) do - io.write(tostring(v)) - io.write('\t') + local ok, func = pcall(loadstring,line) + if ok and func==nil then -- auto-print variables + ok, func = pcall(loadstring,'io.write(tostring(' .. line .. '))') end - io.write('\n') + if func == nil then --Michael.Bringmann@lsi.com + io.write('Compile error: '..line..'\n') + elseif not ok then + io.write('Compile error: '..func..'\n') + else + setfenv(func, eval_env) + local res = {pcall(func)} + if res[1] then + if res[2] then + table.remove(res,1) + for _,v in ipairs(res) do + io.write(tostring(v)) + io.write('\t') + end + end + --update in the context + io.write('\n') + return 0 + else + io.write('Run error: '..res[2]..'\n') + end end - --update in the context - return 0 - else - io.write('Run error: '..res[2]..'\n') - end - end - --}}} - end + --}}} + end end end @@ -1325,90 +1314,93 @@ package.preload['__debugger'] = function() local level = level or 2 trace_event(event,line,level) if event == 'line' then - -- calculate current stack - for i=1,99999,1 do - if not debug.getinfo(i) then break end - stack_level[current_thread] = i - 1 -- minus one to remove this debug_hook stack - end - - local vars,contract_id_hex,contract_id_base58,line = capture_vars(level,1,line) - local stop, ev, idx = false, events.STEP, 0 - while true do - for index, value in pairs(watches) do - setfenv(value.func, vars) - local status, res = pcall(value.func) - if status and res then - ev, idx = events.WATCH, index - stop = true - break - end - end - if stop then break end - if (step_into) - or (step_over and (stack_level[current_thread] <= step_level[current_thread] or stack_level[current_thread] == 0)) then - step_lines = step_lines - 1 - if step_lines < 1 then - ev, idx = events.STEP, 0 - break - end + -- calculate current stack + for i=1,99999,1 do + if not debug.getinfo(i) then break end + stack_level[current_thread] = i - 1 -- minus one to remove this debug_hook stack end - if has_breakpoint(contract_id_hex, line) then - ev, idx = events.BREAK, 0 - break + + local vars,contract_id_hex,contract_id_base58,line = capture_vars(level,1,line) + local stop, ev, idx = false, events.STEP, 0 + while true do + for index, value in pairs(__list_watchpoints()) do + local func = loadstring('return(' .. value .. ')') + if func ~= nil then + setfenv(func, vars) + local status, res = pcall(func) + if status and res then + ev, idx = events.WATCH, index + stop = true + break + end + end + end + if stop then break end + if (step_into) + or (step_over and (stack_level[current_thread] <= step_level[current_thread] or stack_level[current_thread] == 0)) then + step_lines = step_lines - 1 + if step_lines < 1 then + ev, idx = events.STEP, 0 + break + end + end + if has_breakpoint(contract_id_hex, line) then + ev, idx = events.BREAK, 0 + break + end + return end - return - end - if skip_pause_for_init then - --DO notthing - elseif not coro_debugger then - io.write('Lua Debugger\n') - vars, contract_id_base58, line = report(ev, vars, contract_id_base58, line, idx) - io.write('Type \'help\' for commands\n') - coro_debugger = true - else - vars, contract_id_base58, line = report(ev, vars, contract_id_base58, line, idx) - end - tracestack(level) - local last_next = 1 - local next = 'ask' - local silent = false - while true do - if next == 'ask' then - if skip_pause_for_init then - step_into = false - --step_over = false - skip_pause_for_init = false -- reset flag - return -- for the first time + if skip_pause_for_init then + --DO notthing + elseif not coro_debugger then + io.write('Lua Debugger\n') + vars, contract_id_base58, line = report(ev, vars, contract_id_base58, line, idx) + io.write('Type \'help\' for commands\n') + coro_debugger = true + else + vars, contract_id_base58, line = report(ev, vars, contract_id_base58, line, idx) end - next = debugger_loop(ev, vars, contract_id_hex, line, idx) - elseif next == 'cont' then - return - elseif next == 'stop' then - started = false - debug.sethook() - coro_debugger = nil - return - elseif tonumber(next) then --get vars for given level or last level - next = tonumber(next) - if next == 0 then silent = true; next = last_next else silent = false end - last_next = next - restore_vars(level,vars) - vars, contract_id_hex, contract_id_base58, line = capture_vars(level,next) - if not silent then - if vars and vars.__VARSLEVEL__ then - io.write('Level: '..vars.__VARSLEVEL__..'\n') + tracestack(level) + local last_next = 1 + local next = 'ask' + local silent = false + while true do + if next == 'ask' then + if skip_pause_for_init then + step_into = false + --step_over = false + skip_pause_for_init = false -- reset flag + return -- for the first time + end + next = debugger_loop(ev, vars, contract_id_hex, line, idx) + elseif next == 'cont' then + return + elseif next == 'stop' then + started = false + debug.sethook() + coro_debugger = nil + return + elseif tonumber(next) then --get vars for given level or last level + next = tonumber(next) + if next == 0 then silent = true; next = last_next else silent = false end + last_next = next + restore_vars(level,vars) + vars, contract_id_hex, contract_id_base58, line = capture_vars(level,next) + if not silent then + if vars and vars.__VARSLEVEL__ then + io.write('Level: '..vars.__VARSLEVEL__..'\n') + else + io.write('No level set\n') + end + end + ev = events.SET + next = 'ask' else - io.write('No level set\n') + io.write('Unknown command from debugger_loop: '..tostring(next)..'\n') + io.write('Stopping debugger\n') + next = 'stop' end end - ev = events.SET - next = 'ask' - else - io.write('Unknown command from debugger_loop: '..tostring(next)..'\n') - io.write('Stopping debugger\n') - next = 'stop' - end - end end end diff --git a/contract/name/name.go b/contract/name/name.go index 734256272..277ebe061 100644 --- a/contract/name/name.go +++ b/contract/name/name.go @@ -18,6 +18,11 @@ type NameMap struct { Destination []byte } +// AccountStateReader is an interface for getting a name account state. +type AccountStateReader interface { + GetNameAccountState() (*state.ContractState, error) +} + func CreateName(scs *state.ContractState, tx *types.TxBody, sender, receiver *state.V, name string) error { amount := tx.GetAmountBigInt() sender.SubBalance(amount) @@ -136,6 +141,15 @@ func getNameMap(scs *state.ContractState, name []byte, useInitial bool) *NameMap return deserializeNameMap(ownerdata) } +func GetNameInfo(r AccountStateReader, name string) (*types.NameInfo, error) { + scs, err := r.GetNameAccountState() + if err != nil { + return nil, err + } + owner := getOwner(scs, []byte(name), true) + return &types.NameInfo{Name: &types.Name{Name: string(name)}, Owner: owner, Destination: GetAddress(scs, []byte(name))}, err +} + func registerOwner(scs *state.ContractState, name, owner, destination []byte) error { nameMap := &NameMap{Version: 1, Owner: owner, Destination: destination} return setNameMap(scs, name, nameMap) @@ -191,25 +205,3 @@ func deserializeNameMap(data []byte) *NameMap { } return nil } - -/* -version 0 - -func setAddress(scs *state.ContractState, name, address []byte) error { - owner := &Owner{Address: address} - return setOwner(scs, name, owner) -} - -func serializeOwner(owner *Owner) []byte { - if owner != nil { - return owner.Address - } - return nil -} -func deserializeOwner(data []byte) *Owner { - if data != nil { - return &Owner{Address: data} - } - return nil -} -*/ diff --git a/contract/state_module.c b/contract/state_module.c index dfa05116b..a324964cb 100644 --- a/contract/state_module.c +++ b/contract/state_module.c @@ -5,6 +5,7 @@ #include #include +#include #include "vm.h" #include "system_module.h" @@ -26,52 +27,53 @@ static int state_array_pairs(lua_State *L); /* map */ +typedef struct { + char *id; + int key_type; +} state_map_t; + static int state_map(lua_State *L) { - lua_newtable(L); /* m */ - lua_pushstring(L, TYPE_NAME); /* m _type_ */ - lua_pushstring(L, "map"); /* m _type_ map */ - lua_rawset(L, -3); /* m */ - lua_pushcfunction(L, state_map_delete); /* m delete f */ - lua_setfield(L, -2, "delete"); /* m */ - luaL_getmetatable(L, STATE_MAP_ID); /* m mt */ - lua_setmetatable(L, -2); /* m */ + state_map_t *m = lua_newuserdata(L, sizeof(state_map_t)); /* m */ + m->id = NULL; + m->key_type = LUA_TNONE; + luaL_getmetatable(L, STATE_MAP_ID); /* m mt */ + lua_setmetatable(L, -2); /* m */ return 1; } -static void state_map_check_index(lua_State *L, int index) +static void state_map_check_index(lua_State *L, state_map_t *m) { - int expected; - int type = lua_type(L, index); - lua_pushstring(L, KEY_TYPE_NAME); - lua_rawget(L, 1); - if (lua_isnil(L, -1)) { - lua_pushcfunction(L, getItemWithPrefix); /* f */ - lua_getfield(L, 1, "id"); /* f id */ - if (!lua_isstring(L, -1)) { - luaL_error(L, "the value is not a state.map type"); - } - lua_pushstring(L, STATE_VAR_META_TYPE); /* f id prefix */ - lua_call(L, 2, 1); /* t */ + /* m key */ + int key_type = lua_type(L, 2); + int stored_type = m->key_type; + + if (key_type != LUA_TNUMBER && key_type != LUA_TSTRING) { + luaL_error(L, "invalid key type: " LUA_QS ", state.map: " LUA_QS, + lua_typename(L, key_type), m->id); } - if (lua_isnil(L, -1)) { - luaL_argcheck(L, (type == LUA_TNUMBER || type == LUA_TSTRING), - index, "number or string expected"); - } else { - expected = (int)lua_tointeger(L, -1); - if (type != expected) { - luaL_typerror(L, 2, lua_typename(L, expected)); + if (stored_type == LUA_TNONE) { + lua_pushcfunction(L, getItemWithPrefix); /* m key f */ + lua_pushstring(L, m->id); /* m key f id */ + lua_pushstring(L, STATE_VAR_META_TYPE); /* m key f id prefix */ + lua_call(L, 2, 1); /* m key t */ + if (!lua_isnil(L, -1)) { + stored_type = luaL_checkint(L, -1); + if (stored_type != LUA_TNUMBER && stored_type != LUA_TSTRING) { + luaL_error(L, "invalid stored key type: " LUA_QS ", state.map: " LUA_QS, + lua_typename(L, stored_type), m->id); + } } + lua_pop(L, 1); + } + if (stored_type != LUA_TNONE && key_type != stored_type) { + luaL_typerror(L, 2, lua_typename(L, stored_type)); } - lua_pop(L, 1); } -static void state_map_push_key(lua_State *L) +static void state_map_push_key(lua_State *L, state_map_t *m) { - lua_getfield(L, 1, "id"); /* m key value f id */ - if (!lua_isstring(L, -1)) { - luaL_error(L, "the value is not a state.map type"); - } + lua_pushstring(L, m->id); /* m key value f id */ lua_pushstring(L, "-"); lua_pushvalue(L, 2); /* m key value f id '-' key */ lua_concat(L, 3); /* m key value f id-key */ @@ -79,39 +81,55 @@ static void state_map_push_key(lua_State *L) static int state_map_get(lua_State *L) { - luaL_checktype(L, 1, LUA_TTABLE); /* m key */ - state_map_check_index(L, 2); + int key_type = LUA_TNONE; + int arg = lua_gettop(L); + state_map_t *m = luaL_checkudata(L, 1, STATE_MAP_ID); /* m key */ + + key_type = lua_type(L, 2); + if (key_type == LUA_TSTRING) { + const char *method = lua_tostring(L, 2); + if (method != NULL && strcmp(method, "delete") == 0) { + lua_pushcfunction(L, state_map_delete); + return 1; + } + } + + state_map_check_index(L, m); lua_pushcfunction(L, getItemWithPrefix); /* m key f */ - state_map_push_key(L); /* m key f id-key */ + state_map_push_key(L, m); /* m key f id-key */ + if (arg == 3) { + lua_pushvalue(L, 3); + } lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* m key f id-key prefix */ - lua_call(L, 2, 1); /* m key rv */ + lua_call(L, arg, 1); /* m key rv */ return 1; } static int state_map_set(lua_State *L) { - luaL_checktype(L, 1, LUA_TTABLE); /* m key value */ - state_map_check_index(L, 2); - lua_pushstring(L, KEY_TYPE_NAME); /* m key value _key_type_ */ - lua_rawget(L, 1); /* m key value n */ - if (lua_isnil(L, -1)) { - int type = lua_type(L, 2); - lua_pushcfunction(L, setItemWithPrefix); /* m key value n f */ - lua_getfield(L, 1, "id"); /* m key value n f id */ - if (!lua_isstring(L, -1)) { - luaL_error(L, "the value is not a state.map type"); + /* m key value */ + int key_type = LUA_TNONE; + state_map_t *m = luaL_checkudata(L, 1, STATE_MAP_ID); + + key_type = lua_type(L, 2); + if (key_type == LUA_TSTRING) { + const char *method = lua_tostring(L, 2); + if (method != NULL && strcmp(method, "delete") == 0) { + luaL_error(L, "can't use " LUA_QL("delete") " as a key"); } - lua_pushinteger(L, type); /* m key value n f id type */ - lua_pushstring(L, STATE_VAR_META_TYPE); /* m key value n f id type prefix */ - lua_call(L, 3, 0); /* m key value n */ - lua_pushstring(L, KEY_TYPE_NAME); /* m key value n _key_name_ */ - lua_pushinteger(L, type); /* m key value n _key_name_ type */ - lua_rawset(L, 1); } - lua_pop(L, 1); /* T key value "type_name" */ + state_map_check_index(L, m); + if (m->key_type == LUA_TNONE) { + lua_pushcfunction(L, setItemWithPrefix); /* m key f */ + lua_pushstring(L, m->id); /* m key f id */ + lua_pushinteger(L, key_type); /* m key f id type */ + lua_pushstring(L, STATE_VAR_META_TYPE); /* m key f id type prefix */ + lua_call(L, 3, 0); /* m key */ + m->key_type = key_type; + } luaL_checkany(L, 3); lua_pushcfunction(L, setItemWithPrefix); /* m key value f */ - state_map_push_key(L); /* m key value f id-key */ + state_map_push_key(L, m); /* m key value f id-key */ lua_pushvalue(L, 3); /* m key value f id-key value */ lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* m key value f id-key value prefix */ lua_call(L, 3, 0); /* t key value */ @@ -120,31 +138,45 @@ static int state_map_set(lua_State *L) static int state_map_delete(lua_State *L) { - luaL_checktype(L, 1, LUA_TTABLE); /* m key */ - state_map_check_index(L, 2); + /* m key */ + state_map_t *m = luaL_checkudata(L, 1, STATE_MAP_ID); + state_map_check_index(L, m); lua_pushcfunction(L, delItemWithPrefix); /* m key f */ - state_map_push_key(L); /* m key f id-key */ + state_map_push_key(L, m); /* m key f id-key */ lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* m key f id-key prefix */ lua_call(L, 2, 1); /* m key rv */ return 0; } +static int state_map_gc(lua_State *L) +{ + state_map_t *m = luaL_checkudata(L, 1, STATE_MAP_ID); + if (m->id) { + free(m->id); + m->id = NULL; + } + return 0; +} + /* array */ typedef struct { char *id; - int len; + int32_t len; int is_fixed; } state_array_t; static int state_array(lua_State *L) { int is_fixed; - int len = 0; + int32_t len = 0; state_array_t *arr; is_fixed = lua_gettop(L) != 0; if (is_fixed) { + if (!luaL_isinteger(L, 1)) { + luaL_typerror(L, 1, "integer"); + } len = luaL_checkint(L, 1); /* size */ luaL_argcheck(L, (len > 0), 1, "the array length must be greater than zero"); } @@ -197,6 +229,7 @@ static void state_array_push_key(lua_State *L, const char *id) static int state_array_get(lua_State *L) { state_array_t *arr; + int arg = lua_gettop(L); int key_type = LUA_TNONE; arr = luaL_checkudata(L, 1, STATE_ARRAY_ID); @@ -216,11 +249,17 @@ static int state_array_get(lua_State *L) } luaL_typerror(L, 2, "integer"); } + if (arg == 3) { + lua_pushvalue(L, 2); + } state_array_checkarg(L, arr); /* a i */ lua_pushcfunction(L, getItemWithPrefix); /* a i f */ state_array_push_key(L, arr->id); /* a i f id-i */ + if (arg == 3) { + lua_pushvalue(L, 3); /* a i s i f id-i s */ + } lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* a i f id-i prefix */ - lua_call(L, 2, 1); /* a i rv */ + lua_call(L, arg, 1); /* a i rv */ return 1; } @@ -245,7 +284,10 @@ static int state_array_append(lua_State *L) state_array_t *arr = luaL_checkudata(L, 1, STATE_ARRAY_ID); luaL_checkany(L, 2); if (arr->is_fixed) { - return luaL_error(L, "the fixed array cannot use " LUA_QL("append") " method"); + luaL_error(L, "the fixed array cannot use " LUA_QL("append") " method"); + } + if (arr->len + 1 <= 0) { + luaL_error(L, "state.array " LUA_QS " overflow", arr->id); } arr->len++; lua_pushcfunction(L, state_array_set); /* a v f */ @@ -298,42 +340,65 @@ static int state_array_pairs(lua_State *L) /* scalar value */ +typedef struct { + char *id; +} state_value_t; + static int state_value(lua_State *L) { - lua_newtable(L); /* T */ - lua_pushstring(L, TYPE_NAME); /* T _type_ */ - lua_pushstring(L, "value"); /* T _type_ map */ - lua_rawset(L, -3); /* T */ - luaL_getmetatable(L, STATE_VALUE_ID); /* T mt */ - lua_setmetatable(L, -2); /* T */ + state_value_t *v = lua_newuserdata(L, sizeof(state_value_t)); /* v */ + v->id = NULL; + luaL_getmetatable(L, STATE_VALUE_ID); /* v mt */ + lua_setmetatable(L, -2); /* v */ return 1; } static int state_value_get(lua_State *L) { - luaL_checktype(L, 1, LUA_TTABLE); /* t */ - lua_pushcfunction(L, getItemWithPrefix); /* t f */ - lua_getfield(L, 1, "id"); /* t f id */ - if (!lua_isstring(L, -1)) { - luaL_error(L, "the value is not a state.value type"); + state_value_t *v = luaL_checkudata(L, 1, STATE_VALUE_ID); + lua_pushcfunction(L, getItemWithPrefix); /* v f */ + lua_pushstring(L, v->id); /* v f id */ + lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* v f id prefix */ + lua_call(L, 2, 1); /* v rv */ + return 1; +} + +static int state_value_snapget(lua_State *L) +{ + int arg = lua_gettop(L); + state_value_t *v = luaL_checkudata(L, 1, STATE_VALUE_ID); /* v */ + lua_pushcfunction(L, getItemWithPrefix); /* v f */ + lua_pushstring(L, v->id); /* v f id */ + if (arg == 2) { + lua_pushvalue(L, 2); } - lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* t f id prefix */ - lua_call(L, 2, 1); /* t rv */ + lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* v f id prefix */ + lua_call(L, arg + 1, 1); /* v rv */ return 1; } static int state_value_set(lua_State *L) { - luaL_checktype(L, 1, LUA_TTABLE); /* t */ + state_value_t *v = luaL_checkudata(L, 1, STATE_VALUE_ID); /* v */ luaL_checkany(L, 2); lua_pushcfunction(L, setItemWithPrefix); /* t f */ - lua_getfield(L, 1, "id"); /* t f id */ - if (!lua_isstring(L, -1)) { - luaL_error(L, "the value is not a state.value type"); + if (v->id == NULL) { + luaL_error(L, "invalid state.value: (nil)"); } + lua_pushstring(L, v->id); /* v f id */ lua_pushvalue(L, 2); /* t f id value */ - lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* t f id value prefix */ - lua_call(L, 3, 0); /* t */ + lua_pushstring(L, STATE_VAR_KEY_PREFIX); /* v f id value prefix */ + lua_call(L, 3, 0); /* v */ + return 0; +} + +static int state_value_gc(lua_State *L) +{ + state_value_t *v = luaL_checkudata(L, 1, STATE_VALUE_ID); + if (v->id) { + free(v->id); + v->id = NULL; + } return 0; } @@ -351,56 +416,106 @@ static void insert_var(lua_State *L, const char *var_name) static int state_var(lua_State *L) { - int t, i = 1; const char *var_name; + state_map_t *m = NULL; + state_array_t *arr = NULL; + state_value_t *v = NULL; - luaL_checktype(L, 1, LUA_TTABLE); /* T */ - lua_pushnil(L); /* T nil ; push the first key */ - while (lua_next(L, -2) != 0) { /* T key value */ + luaL_checktype(L, 1, LUA_TTABLE); /* T */ + lua_pushnil(L); /* T nil ; push the first key */ + while (lua_next(L, -2) != 0) { /* T key value */ var_name = luaL_checkstring(L, -2); - t = lua_type(L, -1); - if (LUA_TTABLE == t) { - lua_pushstring(L, "id"); /* T key value id */ - lua_pushvalue(L, -3); /* T key value id key */ - lua_rawset(L, -3); /* T key value{id=key} */ - lua_pushstring(L, TYPE_NAME); /* T key value _type_ */ - lua_rawget(L, -2); /* T key value "type_name" */ - if (lua_isnil(L, -1)) { - lua_pushfstring(L, "bad argument " LUA_QL("%s") ": state.value, state.map or state.array expected, got %s", - var_name, lua_typename(L, t)); - lua_error(L); - } - lua_pop(L, 1); /* T key value */ - lua_pushvalue(L, -1); /* T key value value*/ - insert_var(L, var_name); - lua_setglobal(L, var_name); /* T key */ - } else if (LUA_TUSERDATA == t) { - state_array_t *arr = luaL_checkudata(L, -1, STATE_ARRAY_ID); - arr->id = strdup((const char *)lua_tostring(L, -2)); /* T key value */ - lua_newtable(L); /* T key value VT*/ - lua_pushstring(L, TYPE_NAME); /* T key value VT _type_ */ - lua_pushstring(L, "array"); /* T key value VT _type_ "type_name" */ - lua_rawset(L, -3); /* T key value VT{_type_="type_name"} */ - lua_pushstring(L, TYPE_LEN); /* T key value VT _len_ */ - lua_pushinteger(L, arr->len); /* T key value VT _len_ len */ - lua_rawset(L, -3); /* T key value VT{_len_=len} */ + if (!lua_isuserdata(L, -1)) { + lua_pushfstring(L, "bad argument " LUA_QS ": state.value, state.map or state.array expected, got %s", + var_name, lua_typename(L, lua_type(L, -1))); + lua_error(L); + } + + m = luaL_testudata(L, -1, STATE_MAP_ID); + if (m != NULL) { + m->id = strdup(var_name); + lua_newtable(L); /* T key value VT */ + lua_pushstring(L, TYPE_NAME); /* T key value VT _type_ */ + lua_pushstring(L, "map"); /* T key value VT _type_ "map" */ + lua_rawset(L, -3); /* T key value VT{_type="map"} */ + goto found; + } + + arr = luaL_testudata(L, -1, STATE_ARRAY_ID); + if (arr != NULL) { + arr->id = strdup(var_name); /* T key value */ + lua_newtable(L); /* T key value VT*/ + lua_pushstring(L, TYPE_NAME); /* T key value VT _type_ */ + lua_pushstring(L, "array"); /* T key value VT _type_ "array" */ + lua_rawset(L, -3); /* T key value VT{_type_="array"} */ + lua_pushstring(L, TYPE_LEN); /* T key value VT _len_ */ + lua_pushinteger(L, arr->len); /* T key value VT _len_ len */ + lua_rawset(L, -3); /* T key value VT{_type_="array", _len_=len} */ + goto found; + } + + v = luaL_testudata(L, -1, STATE_VALUE_ID); + if (v != NULL) { + v->id = strdup(var_name); + lua_newtable(L); /* T key value VT */ + lua_pushstring(L, TYPE_NAME); /* T key value VT _type_ */ + lua_pushstring(L, "value"); /* T key value VT _type_ "value" */ + lua_rawset(L, -3); /* T key value VT{_type="value"} */ + } + +found: + if (lua_istable(L, -1)) { insert_var(L, var_name); - lua_setglobal(L, var_name); /* T key */ + lua_setglobal(L, var_name); /* T key */ } else { - lua_pushfstring(L, "bad argument " LUA_QL("%s") ": state.value, state.map or state.array expected, got %s", - var_name, lua_typename(L, t)); + lua_pushfstring(L, "bad argument " LUA_QS ": state.value, state.map or state.array expected", var_name); lua_error(L); } - i++; } return 0; } +static int state_get_snap(lua_State *L) +{ + state_map_t *m = NULL; + state_array_t *arr = NULL; + state_value_t *v = NULL; + + if (!lua_isuserdata(L, 1)) { + luaL_typerror(L, 1, "state.value, state.map or state.array"); + } + + m = luaL_testudata(L, 1, STATE_MAP_ID); + if (m != NULL) { + if (lua_gettop(L) != 3) + luaL_error(L, "invalid argument at getsnap, need (state.map, key, blockheight)"); + return state_map_get(L); + } + + arr = luaL_testudata(L, 1, STATE_ARRAY_ID); + if (arr != NULL) { + if (lua_gettop(L) != 3) + luaL_error(L, "invalid argument at getsnap, need (state.array, index, blockheight)"); + return state_array_get(L); + } + + v = luaL_testudata(L, 1, STATE_VALUE_ID); + if (v != NULL) { + if (lua_gettop(L) != 2) + luaL_error(L, "invalid argument at getsnap, need (state.value, blockheight)"); + return state_value_snapget(L); + } + + luaL_typerror(L, 1, "state.value, state.map or state.array"); + return 0; +} + int luaopen_state(lua_State *L) { static const luaL_Reg state_map_metas[] = { {"__index", state_map_get}, {"__newindex", state_map_set}, + {"__gc", state_map_gc}, {NULL, NULL} }; @@ -423,6 +538,7 @@ int luaopen_state(lua_State *L) {"array", state_array}, {"value", state_value}, {"var", state_var}, + {"getsnap", state_get_snap}, {NULL, NULL} }; @@ -436,6 +552,8 @@ int luaopen_state(lua_State *L) lua_pushvalue(L, -1); lua_setfield(L, -2, "__index"); luaL_register(L, NULL, state_value_methods); + lua_pushcfunction(L, state_value_gc); + lua_setfield(L, -2, "__gc"); luaL_register(L, "state", state_lib); diff --git a/contract/system_module.c b/contract/system_module.c index 62ad6f07b..5d6d566fb 100644 --- a/contract/system_module.c +++ b/contract/system_module.c @@ -1,6 +1,7 @@ #include #include #include +#include #include "vm.h" #include "util.h" #include "_cgo_export.h" @@ -73,15 +74,28 @@ int getItemWithPrefix(lua_State *L) char *dbKey; int *service = (int *)getLuaExecContext(L); char *jsonValue; + char *blkno = NULL; struct LuaGetDB_return ret; if (service == NULL) { luaL_error(L, "cannot find execution context"); } luaL_checkstring(L, 1); - luaL_checkstring(L, 2); + if(lua_gettop(L) == 2) { + luaL_checkstring(L, 2); + } + else if (lua_gettop(L) == 3) { + if (!lua_isnil(L, 2)) { + int type = lua_type(L,2); + if (type != LUA_TNUMBER && type != LUA_TSTRING) + luaL_error(L, "snap height permitted number or string type"); + blkno = (char *)lua_tostring(L, 2); + } + luaL_checkstring(L, 3); + } dbKey = getDbKey(L); - ret = LuaGetDB(L, service, dbKey); + + ret = LuaGetDB(L, service, dbKey, blkno); if (ret.r1 != NULL) { strPushAndRelease(L, ret.r1); luaL_throwerror(L); @@ -101,6 +115,10 @@ int getItem(lua_State *L) { luaL_checkstring(L, 1); lua_pushstring(L, STATE_DB_KEY_PREFIX); + if (lua_gettop(L) == 3) { + if (!lua_isnil(L, 2)) + luaL_checknumber(L, 2); + } return getItemWithPrefix(L); } @@ -189,7 +207,7 @@ static int getCreator(lua_State *L) if (service == NULL) { luaL_error(L, "cannot find execution context"); } - ret = LuaGetDB(L, service, "Creator"); + ret = LuaGetDB(L, service, "Creator", 0); if (ret.r1 != NULL) { strPushAndRelease(L, ret.r1); luaL_throwerror(L); diff --git a/contract/vm.c b/contract/vm.c index 6887ab86a..52610ba72 100644 --- a/contract/vm.c +++ b/contract/vm.c @@ -218,36 +218,3 @@ void vm_get_abi_function(lua_State *L, char *fname) lua_pushstring(L, fname); } -int vm_is_payable_function(lua_State *L, char *fname) -{ - int err; - lua_getfield(L, LUA_GLOBALSINDEX, "abi"); - lua_getfield(L, -1, "is_payable"); - lua_pushstring(L, fname); - err = pcall(L, 1, 1, 1); - if (err != 0) { - return 0; - } - return lua_tointeger(L, -1); -} - -char *vm_resolve_function(lua_State *L, char *fname, int *viewflag, int *payflag) -{ - int err; - - lua_getfield(L, LUA_GLOBALSINDEX, "abi"); - lua_getfield(L, -1, "resolve"); - lua_pushstring(L, fname); - err = pcall(L, 1, 3, 1); - if (err != 0) { - return NULL; - } - fname = (char *)lua_tostring(L, -3); - if (fname == NULL) - return fname; - *payflag = lua_tointeger(L, -2); - *viewflag = lua_tointeger(L, -1); - - return fname; -} - diff --git a/contract/vm.go b/contract/vm.go index c5b5d6e9b..5470e95f0 100644 --- a/contract/vm.go +++ b/contract/vm.go @@ -56,6 +56,11 @@ var ( zeroFee *big.Int ) +type ChainAccessor interface { + GetBlockByNo(blockNo types.BlockNo) (*types.Block, error) + GetBestBlock() (*types.Block, error) +} + type CallState struct { ctrState *state.ContractState prevState *types.State @@ -74,6 +79,7 @@ type ContractInfo struct { type StateSet struct { curContract *ContractInfo bs *state.BlockState + cdb ChainAccessor origin []byte txHash []byte blockHeight uint64 @@ -132,7 +138,7 @@ func newContractInfo(callState *CallState, sender, contractId []byte, rp uint64, } } -func NewContext(blockState *state.BlockState, sender, reciever *state.V, +func NewContext(blockState *state.BlockState, cdb ChainAccessor, sender, reciever *state.V, contractState *state.ContractState, senderID []byte, txHash []byte, blockHeight uint64, timestamp int64, prevBlockHash []byte, node string, confirmed bool, query bool, rp uint64, service int, amount *big.Int) *StateSet { @@ -142,6 +148,7 @@ func NewContext(blockState *state.BlockState, sender, reciever *state.V, stateSet := &StateSet{ curContract: newContractInfo(callState, senderID, reciever.ID(), rp, amount), bs: blockState, + cdb: cdb, origin: senderID, txHash: txHash, node: node, @@ -161,7 +168,7 @@ func NewContext(blockState *state.BlockState, sender, reciever *state.V, return stateSet } -func NewContextQuery(blockState *state.BlockState, receiverId []byte, +func NewContextQuery(blockState *state.BlockState, cdb ChainAccessor, receiverId []byte, contractState *state.ContractState, node string, confirmed bool, rp uint64) *StateSet { @@ -170,6 +177,7 @@ func NewContextQuery(blockState *state.BlockState, receiverId []byte, stateSet := &StateSet{ curContract: newContractInfo(callState, nil, receiverId, rp, big.NewInt(0)), bs: blockState, + cdb: cdb, node: node, confirmed: confirmed, timestamp: time.Now().UnixNano(), @@ -199,7 +207,39 @@ func (L *LState) Close() { } } -func newExecutor(contract []byte, contractId []byte, stateSet *StateSet, ci *types.CallInfo, amount *big.Int, isCreate bool) *Executor { +func resolveFunction(contractState *state.ContractState, name string, constructor bool) (*types.Function, error) { + abi, err := GetABI(contractState) + if err != nil { + return nil, err + } + var defaultFunc *types.Function + for _, f := range abi.Functions { + if f.Name == name { + return f, nil + } + if f.Name == "default" { + defaultFunc = f + } + } + if constructor { + return nil, nil + } + if defaultFunc != nil { + return defaultFunc, nil + } + return nil, errors.New("not found function: " + name) +} + +func newExecutor( + contract []byte, + contractId []byte, + stateSet *StateSet, + ci *types.CallInfo, + amount *big.Int, + isCreate bool, + ctrState *state.ContractState, +) *Executor { + if stateSet.callDepth > maxCallDepth { ce := &Executor{ code: contract, @@ -219,7 +259,7 @@ func newExecutor(contract []byte, contractId []byte, stateSet *StateSet, ci *typ ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("new AergoLua executor") return ce } - bakupService := stateSet.service + backupService := stateSet.service stateSet.service = -1 hexId := C.CString(hex.EncodeToString(contractId)) defer C.free(unsafe.Pointer(hexId)) @@ -235,43 +275,53 @@ func newExecutor(contract []byte, contractId []byte, stateSet *StateSet, ci *typ ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("failed to load code") return ce } - stateSet.service = bakupService + stateSet.service = backupService - if isCreate == false { - C.vm_remove_constructor(ce.L) - fname := C.CString(ci.Name) - defer C.free(unsafe.Pointer(fname)) - - var viewFlag, payFlag C.int - resolvedName := C.vm_resolve_function(ce.L, fname, &viewFlag, &payFlag) - if resolvedName == nil { - ce.err = fmt.Errorf("attempt to call global '%s' (a nil value)", ci.Name) - ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("not found function") - return ce - } - - if err := checkPayable(ce.L, resolvedName, &payFlag, amount); err != nil { + if isCreate { + f, err := resolveFunction(ctrState, "constructor", isCreate) + if err != nil { ce.err = err - ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("check payable function") + ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("not found function") return ce } - if viewFlag != C.int(0) { - ce.isView = true + if f == nil { + f = &types.Function{ + Name: "constructor", + Payable: false, + } } - C.vm_get_abi_function(ce.L, resolvedName) - ce.numArgs = C.int(len(ci.Args) + 1) - } else { - if err := checkPayable(ce.L, C.construct_name, nil, amount); err != nil { - ce.err = errVmConstructorIsNotPayable + err = checkPayable(f, amount) + if err != nil { + ce.err = err ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("check payable function") return ce } + ce.isView = f.View C.vm_get_constructor(ce.L) if C.vm_isnil(ce.L, C.int(-1)) == 1 { ce.close() return nil } ce.numArgs = C.int(len(ci.Args)) + } else { + C.vm_remove_constructor(ce.L) + f, err := resolveFunction(ctrState, ci.Name, isCreate) + if err != nil { + ce.err = err + ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("not found function") + return ce + } + err = checkPayable(f, amount) + if err != nil { + ce.err = err + ctrLog.Error().Err(ce.err).Str("contract", types.EncodeAddress(contractId)).Msg("check payable function") + return ce + } + ce.isView = f.View + resolvedName := C.CString(f.Name) + C.vm_get_abi_function(ce.L, resolvedName) + C.free(unsafe.Pointer(resolvedName)) + ce.numArgs = C.int(len(ci.Args) + 1) } ce.processArgs(ci) if ce.err != nil { @@ -387,20 +437,11 @@ func toLuaTable(L *LState, tab map[string]interface{}) error { return nil } -func checkPayable(L *LState, fname *C.char, flag *C.int, amount *big.Int) error { - if amount.Cmp(big.NewInt(0)) <= 0 { +func checkPayable(callee *types.Function, amount *big.Int) error { + if amount.Cmp(big.NewInt(0)) <= 0 || callee.Payable { return nil } - var payableFlag C.int - if flag == nil { - payableFlag = C.vm_is_payable_function(L, fname) - } else { - payableFlag = *flag - } - if payableFlag == C.int(0) { - return fmt.Errorf("'%v' is not payable", C.GoString(fname)) - } - return nil + return fmt.Errorf("'%s' is not payable", callee.Name) } func (ce *Executor) call(target *LState) C.int { @@ -558,26 +599,25 @@ func Call(contractState *state.ContractState, code, contractAddress []byte, } curStateSet[stateSet.service] = stateSet - ce := newExecutor(contract, contractAddress, stateSet, &ci, stateSet.curContract.amount, false) + ce := newExecutor(contract, contractAddress, stateSet, &ci, stateSet.curContract.amount, false, contractState) defer ce.close() - ce.setCountHook(callMaxInstLimit) ce.call(nil) err = ce.err - if err == nil { - err = ce.commitCalledContract() - if err != nil { - logger.Error().Err(err).Str("contract", types.EncodeAddress(contractAddress)).Msg("commit state") - } - } else { + if err != nil { if dbErr := ce.rollbackToSavepoint(); dbErr != nil { - logger.Error().Err(err).Str("contract", types.EncodeAddress(contractAddress)).Msg("rollback state") + logger.Error().Err(dbErr).Str("contract", types.EncodeAddress(contractAddress)).Msg("rollback state") err = dbErr } + return "", ce.getEvents(), stateSet.usedFee(), err } - - return ce.jsonRet, ce.getEvents(), stateSet.usedFee(), err + err = ce.commitCalledContract() + if err != nil { + logger.Error().Err(err).Str("contract", types.EncodeAddress(contractAddress)).Msg("commit state") + return "", ce.getEvents(), stateSet.usedFee(), err + } + return ce.jsonRet, ce.getEvents(), stateSet.usedFee(), nil } func setRandomSeed(stateSet *StateSet) { @@ -666,7 +706,7 @@ func PreloadEx(bs *state.BlockState, contractState *state.ContractState, contrac if ctrLog.IsDebugEnabled() { ctrLog.Debug().Str("abi", string(code)).Str("contract", types.EncodeAddress(contractAddress)).Msg("preload") } - ce := newExecutor(contractCode, contractAddress, stateSet, &ci, stateSet.curContract.amount, false) + ce := newExecutor(contractCode, contractAddress, stateSet, &ci, stateSet.curContract.amount, false, contractState) ce.setCountHook(callMaxInstLimit) return ce, nil @@ -739,34 +779,30 @@ func Create(contractState *state.ContractState, code, contractAddress []byte, return "", nil, stateSet.usedFee(), newDbSystemError(errors.New("can't open a database connection")) } - var ce *Executor - ce = newExecutor(contract, contractAddress, stateSet, &ci, stateSet.curContract.amount, true) + ce := newExecutor(contract, contractAddress, stateSet, &ci, stateSet.curContract.amount, true, contractState) if ce == nil { return "", nil, stateSet.usedFee(), nil } defer ce.close() - ce.setCountHook(callMaxInstLimit) + ce.call(nil) err = ce.err - if err != nil { - logger.Warn().Err(err).Msg("constructor is failed") - ret, _ := json.Marshal("constructor call error:" + err.Error()) + logger.Warn().Msg("constructor is failed") if dbErr := ce.rollbackToSavepoint(); dbErr != nil { - logger.Error().Err(dbErr).Msg("constructor is failed") - return string(ret), ce.getEvents(), stateSet.usedFee(), dbErr + logger.Error().Err(dbErr).Msg("rollback state") + err = dbErr } - return string(ret), ce.getEvents(), stateSet.usedFee(), err + return "", ce.getEvents(), stateSet.usedFee(), err } err = ce.commitCalledContract() if err != nil { - ret, _ := json.Marshal("constructor call error:" + err.Error()) - logger.Error().Err(err).Msg("constructor is failed") - return string(ret), ce.getEvents(), stateSet.usedFee(), err + logger.Warn().Msg("constructor is failed") + logger.Error().Err(err).Msg("commit state") + return "", ce.getEvents(), stateSet.usedFee(), err } - - return ce.jsonRet, ce.getEvents(), stateSet.usedFee(), err + return ce.jsonRet, ce.getEvents(), stateSet.usedFee(), nil } func setQueryContext(stateSet *StateSet) { @@ -793,7 +829,7 @@ func setQueryContext(stateSet *StateSet) { } } -func Query(contractAddress []byte, bs *state.BlockState, contractState *state.ContractState, queryInfo []byte) (res []byte, err error) { +func Query(contractAddress []byte, bs *state.BlockState, cdb ChainAccessor, contractState *state.ContractState, queryInfo []byte) (res []byte, err error) { var ci types.CallInfo contract := getContract(contractState, nil) if contract != nil { @@ -807,16 +843,14 @@ func Query(contractAddress []byte, bs *state.BlockState, contractState *state.Co return } - var ce *Executor - - stateSet := NewContextQuery(bs, contractAddress, contractState, "", true, + stateSet := NewContextQuery(bs, cdb, contractAddress, contractState, "", true, contractState.SqlRecoveryPoint) setQueryContext(stateSet) if ctrLog.IsDebugEnabled() { ctrLog.Debug().Str("abi", string(queryInfo)).Str("contract", types.EncodeAddress(contractAddress)).Msg("query") } - ce = newExecutor(contract, contractAddress, stateSet, &ci, stateSet.curContract.amount, false) + ce := newExecutor(contract, contractAddress, stateSet, &ci, stateSet.curContract.amount, false, contractState) defer ce.close() defer func() { if dbErr := ce.rollbackToSavepoint(); dbErr != nil { diff --git a/contract/vm_callback.go b/contract/vm_callback.go index 98e21d566..12786360a 100644 --- a/contract/vm_callback.go +++ b/contract/vm_callback.go @@ -15,6 +15,7 @@ import ( "encoding/hex" "errors" "fmt" + "github.com/aergoio/aergo/internal/common" "index/suffixarray" "math/big" "regexp" @@ -73,11 +74,52 @@ func LuaSetDB(L *LState, service *C.int, key *C.char, value *C.char) *C.char { } //export LuaGetDB -func LuaGetDB(L *LState, service *C.int, key *C.char) (*C.char, *C.char) { +func LuaGetDB(L *LState, service *C.int, key *C.char, blkno *C.char) (*C.char, *C.char) { stateSet := curStateSet[*service] if stateSet == nil { return nil, C.CString("[System.LuaGetDB] contract state not found") } + if blkno != nil { + bigNo, _ := new(big.Int).SetString(strings.TrimSpace(C.GoString(blkno)), 10) + if bigNo == nil || bigNo.Sign() < 0 { + return nil, C.CString("[System.LuaGetDB] invalid blockheight value :"+C.GoString(blkno)) + } + blkNo := bigNo.Uint64() + + chainBlockHeight := stateSet.blockHeight + if chainBlockHeight == 0 { + bestBlock, err := stateSet.cdb.GetBestBlock() + if err != nil { + return nil, C.CString("[System.LuaGetDB] get best block error") + } + chainBlockHeight = bestBlock.GetHeader().GetBlockNo() + } + if blkNo < chainBlockHeight { + blk, err := stateSet.cdb.GetBlockByNo(blkNo) + if err != nil { + return nil, C.CString(err.Error()) + } + accountId := types.ToAccountID(stateSet.curContract.contractId) + contractProof, err := stateSet.bs.GetAccountAndProof(accountId[:], blk.GetHeader().GetBlocksRootHash(), false) + if err != nil { + return nil, C.CString("[System.LuaGetDB] failed to get snapshot state for account") + } else if contractProof.Inclusion { + trieKey := common.Hasher([]byte(C.GoString(key))) + varProof, err := stateSet.bs.GetVarAndProof(trieKey, contractProof.GetState().GetStorageRoot(), false) + if err != nil { + return nil, C.CString("[System.LuaGetDB] failed to get snapshot state variable in contract") + } + if varProof.Inclusion { + if len(varProof.GetValue()) == 0 { + return nil, nil + } + return C.CString(string(varProof.GetValue())), nil + } + } + return nil, nil + } + } + data, err := stateSet.curContract.callState.ctrState.GetData([]byte(C.GoString(key))) if err != nil { return nil, C.CString(err.Error()) @@ -192,7 +234,7 @@ func LuaCallContract(L *LState, service *C.int, contractId *C.char, fname *C.cha return -1, C.CString("[Contract.LuaCallContract] invalid arguments: " + err.Error()) } - ce := newExecutor(callee, cid, stateSet, &ci, amountBig, false) + ce := newExecutor(callee, cid, stateSet, &ci, amountBig, false, callState.ctrState) defer ce.close() if ce.err != nil { @@ -278,7 +320,7 @@ func LuaDelegateCallContract(L *LState, service *C.int, contractId *C.char, return -1, C.CString("[Contract.LuaDelegateCallContract] invalid arguments: " + err.Error()) } - ce := newExecutor(contract, cid, stateSet, &ci, zeroBig, false) + ce := newExecutor(contract, cid, stateSet, &ci, zeroBig, false, contractState) defer ce.close() if ce.err != nil { @@ -364,7 +406,7 @@ func LuaSendAmount(L *LState, service *C.int, contractId *C.char, amount *C.char return C.CString("[Contract.LuaSendAmount] cannot find contract:" + C.GoString(contractId)) } - ce := newExecutor(code, cid, stateSet, &ci, amountBig, false) + ce := newExecutor(code, cid, stateSet, &ci, amountBig, false, callState.ctrState) defer ce.close() if ce.err != nil { return C.CString("[Contract.LuaSendAmount] newExecutor error: " + ce.err.Error()) @@ -919,7 +961,7 @@ func LuaDeployContract( return -1, C.CString("[Contract.LuaDeployContract]:" + err.Error()) } - ce := newExecutor(runCode, newContract.ID(), stateSet, &ci, amountBig, true) + ce := newExecutor(runCode, newContract.ID(), stateSet, &ci, amountBig, true, contractState) if ce != nil { defer ce.close() if ce.err != nil { diff --git a/contract/vm_dummy.go b/contract/vm_dummy.go index 474625d60..64e5aabb8 100644 --- a/contract/vm_dummy.go +++ b/contract/vm_dummy.go @@ -109,8 +109,16 @@ func (bc *DummyChain) GetStaking(name string) (*types.Staking, error) { return system.GetStaking(scs, strHash(name)) } +func (bc *DummyChain) GetBlockByNo(blockNo types.BlockNo) (*types.Block, error) { + return bc.blocks[blockNo], nil +} + +func (bc *DummyChain) GetBestBlock() (*types.Block, error) { + return bc.bestBlock, nil +} + type luaTx interface { - run(bs *state.BlockState, blockNo uint64, ts int64, prevBlockHash []byte, receiptTx db.Transaction) error + run(bs *state.BlockState, bc *DummyChain, blockNo uint64, ts int64, prevBlockHash []byte, receiptTx db.Transaction) error } type luaTxAccount struct { @@ -132,7 +140,7 @@ func NewLuaTxAccountBig(name string, balance *big.Int) *luaTxAccount { } } -func (l *luaTxAccount) run(bs *state.BlockState, blockNo uint64, ts int64, prevBlockHash []byte, +func (l *luaTxAccount) run(bs *state.BlockState, bc *DummyChain, blockNo uint64, ts int64, prevBlockHash []byte, receiptTx db.Transaction) error { id := types.ToAccountID(l.name) @@ -160,7 +168,7 @@ func NewLuaTxSendBig(sender, receiver string, balance *big.Int) *luaTxSend { } } -func (l *luaTxSend) run(bs *state.BlockState, blockNo uint64, ts int64, prevBlockHash []byte, +func (l *luaTxSend) run(bs *state.BlockState, bc *DummyChain, blockNo uint64, ts int64, prevBlockHash []byte, receiptTx db.Transaction) error { senderID := types.ToAccountID(l.sender) @@ -247,7 +255,7 @@ func getCompiledABI(code string) ([]byte, error) { return b[4+codeLen:], nil } -func NewRawLuaTxDef(sender, contract string, amount uint64, code string) *luaTxDef { +func NewRawLuaTxDefBig(sender, contract string, amount *big.Int, code string) *luaTxDef { byteAbi, err := getCompiledABI(code) if err != nil { @@ -266,7 +274,7 @@ func NewRawLuaTxDef(sender, contract string, amount uint64, code string) *luaTxD sender: strHash(sender), contract: strHash(contract), code: payload, - amount: new(big.Int).SetUint64(amount), + amount: amount, id: newTxId(), }, cErr: nil, @@ -350,7 +358,7 @@ func contractFrame(l *luaTxCommon, bs *state.BlockState, } -func (l *luaTxDef) run(bs *state.BlockState, blockNo uint64, ts int64, prevBlockHash []byte, +func (l *luaTxDef) run(bs *state.BlockState, bc *DummyChain, blockNo uint64, ts int64, prevBlockHash []byte, receiptTx db.Transaction) error { if l.cErr != nil { @@ -361,7 +369,7 @@ func (l *luaTxDef) run(bs *state.BlockState, blockNo uint64, ts int64, prevBlock func(sender, contract *state.V, contractId types.AccountID, eContractState *state.ContractState) error { contract.State().SqlRecoveryPoint = 1 - stateSet := NewContext(bs, sender, contract, eContractState, sender.ID(), + stateSet := NewContext(bs, nil, sender, contract, eContractState, sender.ID(), l.hash(), blockNo, ts, prevBlockHash, "", true, false, contract.State().SqlRecoveryPoint, ChainService, l.luaTxCommon.amount) @@ -419,15 +427,14 @@ func (l *luaTxCall) Fail(expectedErr string) *luaTxCall { return l } -func (l *luaTxCall) run(bs *state.BlockState, blockNo uint64, ts int64, prevBlockHash []byte, +func (l *luaTxCall) run(bs *state.BlockState, bc *DummyChain, blockNo uint64, ts int64, prevBlockHash []byte, receiptTx db.Transaction) error { err := contractFrame(&l.luaTxCommon, bs, func(sender, contract *state.V, contractId types.AccountID, eContractState *state.ContractState) error { - stateSet := NewContext(bs, sender, contract, eContractState, sender.ID(), + stateSet := NewContext(bs, bc, sender, contract, eContractState, sender.ID(), l.hash(), blockNo, ts, prevBlockHash, "", true, false, contract.State().SqlRecoveryPoint, ChainService, l.luaTxCommon.amount) rv, evs, _, err := Call(eContractState, l.code, l.contract, stateSet) - _ = bs.StageContractState(eContractState) if err != nil { r := types.NewReceipt(l.contract, err.Error(), "") r.TxHash = l.hash() @@ -435,6 +442,7 @@ func (l *luaTxCall) run(bs *state.BlockState, blockNo uint64, ts int64, prevBloc receiptTx.Set(l.hash(), b) return err } + _ = bs.StageContractState(eContractState) r := types.NewReceipt(l.contract, "SUCCESS", rv) r.Events = evs r.TxHash = l.hash() @@ -466,7 +474,7 @@ func (bc *DummyChain) ConnectBlock(txs ...luaTx) error { defer tx.Commit() for _, x := range txs { - if err := x.run(blockState, bc.cBlock.Header.BlockNo, bc.cBlock.Header.Timestamp, + if err := x.run(blockState, bc, bc.cBlock.Header.BlockNo, bc.cBlock.Header.Timestamp, bc.cBlock.Header.PrevBlockHash, tx); err != nil { return err } @@ -482,6 +490,7 @@ func (bc *DummyChain) ConnectBlock(txs ...luaTx) error { //FIXME newblock must be created after sdb.apply() bc.cBlock.SetBlocksRootHash(bc.sdb.GetRoot()) bc.bestBlockNo = bc.bestBlockNo + 1 + bc.bestBlock = bc.cBlock bc.bestBlockId = types.ToBlockID(bc.cBlock.BlockHash()) bc.blockIds = append(bc.blockIds, bc.bestBlockId) bc.blocks = append(bc.blocks, bc.cBlock) @@ -504,7 +513,7 @@ func (bc *DummyChain) DisConnectBlock() error { if bestBlock != nil { sroot = bestBlock.GetHeader().GetBlocksRootHash() } - return bc.sdb.Rollback(sroot) + return bc.sdb.SetRoot(sroot) } func (bc *DummyChain) Query(contract, queryInfo, expectedErr string, expectedRvs ...string) error { @@ -512,7 +521,7 @@ func (bc *DummyChain) Query(contract, queryInfo, expectedErr string, expectedRvs if err != nil { return err } - rv, err := Query(strHash(contract), bc.newBState(), cState, []byte(queryInfo)) + rv, err := Query(strHash(contract), bc.newBState(), bc, cState, []byte(queryInfo)) if expectedErr != "" { if err == nil { return fmt.Errorf("no error, expected: %s", expectedErr) @@ -541,7 +550,7 @@ func (bc *DummyChain) QueryOnly(contract, queryInfo string) (string, error) { if err != nil { return "", err } - rv, err := Query(strHash(contract), bc.newBState(), cState, []byte(queryInfo)) + rv, err := Query(strHash(contract), bc.newBState(), nil, cState, []byte(queryInfo)) if err != nil { return "", err diff --git a/contract/vm_test.go b/contract/vm_test.go index f068c2605..e3ad40ed7 100644 --- a/contract/vm_test.go +++ b/contract/vm_test.go @@ -514,10 +514,13 @@ function infiniteLoop() end return t end +function infiniteCall() + infiniteCall() +end function catch() return pcall(infiniteLoop) end -abi.register(infiniteLoop, catch)` +abi.register(infiniteLoop, infiniteCall, catch)` err = bc.ConnectBlock( NewLuaTxAccount("ktlee", 100), @@ -557,6 +560,23 @@ abi.register(infiniteLoop, catch)` if err != nil && !strings.Contains(err.Error(), errMsg) { t.Error(err) } + + err = bc.ConnectBlock( + NewLuaTxCall( + "ktlee", + "loop", + 0, + `{"Name":"infiniteCall"}`, + ), + ) + errMsg = "stack overflow" + if err == nil { + t.Errorf("expected: %s", errMsg) + } + if err != nil && !strings.Contains(err.Error(), errMsg) { + t.Error(err) + } + } func TestUpdateSize(t *testing.T) { @@ -1935,6 +1955,26 @@ func TestArray(t *testing.T) { if err != nil { t.Error(err) } + overflow := ` + state.var{ + counts = state.array(1000000000000) + } + + function get() + return "hello" + end + + abi.register(get) + ` + err = bc.ConnectBlock( + NewLuaTxDef("ktlee", "overflow", 0, overflow), + ) + errMsg := "integer expected, got number" + if err == nil { + t.Errorf("expected: '%s', but got: nil", errMsg) + } else if !strings.Contains(err.Error(), errMsg) { + t.Errorf("expected: %s, but got: %s", errMsg, err.Error()) + } } func TestPcall(t *testing.T) { @@ -2396,7 +2436,7 @@ func TestMapKey(t *testing.T) { err = bc.ConnectBlock( NewLuaTxCall("ktlee", "a", 0, `{"Name":"setCount", "Args":[1, 10]}`), - NewLuaTxCall("ktlee", "a", 0, `{"Name":"setCount", "Args":["1", 20]}`).Fail("number expected, got string)"), + NewLuaTxCall("ktlee", "a", 0, `{"Name":"setCount", "Args":["1", 20]}`).Fail("(number expected, got string)"), NewLuaTxCall("ktlee", "a", 0, `{"Name":"setCount", "Args":[1.1, 30]}`), ) if err != nil { @@ -2417,7 +2457,7 @@ func TestMapKey(t *testing.T) { err = bc.ConnectBlock( NewLuaTxCall("ktlee", "a", 0, `{"Name":"setCount", "Args":[true, 40]}`, - ).Fail(`bad argument #2 to '__newindex' (number expected, got boolean)`), + ).Fail(`invalid key type: 'boolean', state.map: 'counts'`), ) if err != nil { t.Error(err) @@ -2802,9 +2842,9 @@ abi.payable(save) NewLuaTxDef("ktlee", "payable", 1, src), ) if err == nil { - t.Error("expected: " + errVmConstructorIsNotPayable.Error()) + t.Error("expected: 'constructor' is not payable") } else { - if !strings.Contains(err.Error(), errVmConstructorIsNotPayable.Error()) { + if !strings.Contains(err.Error(), "'constructor' is not payable") { t.Error(err) } } @@ -3780,7 +3820,7 @@ func TestContractSend(t *testing.T) { t.Error(err) } err = bc.ConnectBlock( - NewLuaTxCall("ktlee", "test1", 0, fmt.Sprintf(`{"Name":"send", "Args":["%s"]}`, types.EncodeAddress(strHash("test3")))).Fail(`[Contract.LuaSendAmount] newExecutor error: attempt to call global 'default' (a nil value)`), + NewLuaTxCall("ktlee", "test1", 0, fmt.Sprintf(`{"Name":"send", "Args":["%s"]}`, types.EncodeAddress(strHash("test3")))).Fail(`[Contract.LuaSendAmount] newExecutor error: not found function: default`), ) if err != nil { t.Error(err) @@ -3791,6 +3831,7 @@ func TestContractSend(t *testing.T) { if err != nil { t.Error(err) } + err = bc.ConnectBlock( NewLuaTxCall("ktlee", "test1", 0, fmt.Sprintf(`{"Name":"send", "Args":["%s"]}`, types.EncodeAddress(strHash("ktlee")))), ) @@ -4006,7 +4047,7 @@ abi.register(key_table, key_func, key_statemap, key_statearray, key_statevalue, } err = bc.ConnectBlock( NewLuaTxCall("ktlee", "invalidkey", 0, `{"Name":"key_statemap"}`).Fail( - "cannot use 'table' as a key", + "cannot use 'userdata' as a key", ), ) if err != nil { @@ -4022,7 +4063,7 @@ abi.register(key_table, key_func, key_statemap, key_statearray, key_statevalue, } err = bc.ConnectBlock( NewLuaTxCall("ktlee", "invalidkey", 0, `{"Name":"key_statevalue"}`).Fail( - "cannot use 'table' as a key", + "cannot use 'userdata' as a key", ), ) if err != nil { @@ -4038,7 +4079,7 @@ abi.register(key_table, key_func, key_statemap, key_statearray, key_statevalue, } err = bc.ConnectBlock( NewLuaTxCall("ktlee", "invalidkey", 0, `{"Name":"key_nil"}`).Fail( - "bad argument #2 to '__newindex' (number or string expected)", + "invalid key type: 'nil', state.map: 'h'", ), ) if err != nil { @@ -4345,4 +4386,79 @@ abi.payable(pcall1, default, constructor) } } +func TestSnapshot(t *testing.T) { + bc, err := LoadDummyChain() + if err != nil { + t.Errorf("failed to create test database: %v", err) + } + definition := ` + state.var{ + counts = state.map(), + data = state.value(), + array = state.array(10) + } + + function inc() + a = system.getItem("key1") + if (a == nil) then + system.setItem("key1", 1) + return + end + system.setItem("key1", a + 1) + counts["key1"] = a + 1 + data:set(a+1) + array[1] = a + 1 + end + function query(a) + return system.getItem("key1", a), state.getsnap(counts, "key1", a), state.getsnap(data,a), state.getsnap(array, 1, a) + end + function query2() + return state.getsnap(array, 1) + end + abi.register(inc, query, query2) + abi.payable(inc)` + + err = bc.ConnectBlock( + NewLuaTxAccount("ktlee", 100), + NewLuaTxDef("ktlee", "snap", 0, definition), + ) + if err != nil { + t.Error(err) + } + err = bc.ConnectBlock( + NewLuaTxCall("ktlee", "snap", 0, `{"Name": "inc", "Args":[]}`), + ) + if err != nil { + t.Error(err) + } + err = bc.ConnectBlock( + NewLuaTxCall("ktlee", "snap", 0, `{"Name": "inc", "Args":[]}`), + ) + if err != nil { + t.Error(err) + } + err = bc.ConnectBlock( + NewLuaTxCall("ktlee", "snap", 0, `{"Name": "inc", "Args":[]}`), + ) + if err != nil { + t.Error(err) + } + err = bc.Query("snap", `{"Name":"query"}`, "", "[3,3,3,3]") + if err != nil { + t.Error(err) + } + err = bc.Query("snap", `{"Name":"query", "Args":[2]}`, "", "[1,{},{},{}]") + if err != nil { + t.Error(err) + } + err = bc.Query("snap", `{"Name":"query", "Args":[3]}`, "", "[2,2,2,2]") + if err != nil { + t.Error(err) + } + err = bc.Query("snap", `{"Name":"query2", "Args":[]}`, + "invalid argument at getsnap, need (state.array, index, blockheight)", "") + if err != nil { + t.Error(err) + } +} // end of test-cases diff --git a/glide.lock b/glide.lock deleted file mode 100644 index db87c1bb8..000000000 --- a/glide.lock +++ /dev/null @@ -1,560 +0,0 @@ -hash: d159573adefc8682d7a81bf843426f1b152518aeef4c894b7c05b8d8ab06b24c -updated: 2019-03-08T15:40:19.009606+09:00 -imports: -- name: github.com/aergoio/aergo-actor - version: 562037d5fec70391e3c047a5293b49a443237386 - subpackages: - - actor - - eventstream - - internal/queue/goring - - internal/queue/mpsc - - mailbox - - router -- name: github.com/aergoio/aergo-lib - version: f822f6e881ffc7eba41983b88be30217cf19cb79 - subpackages: - - config - - db - - log -- name: github.com/aergoio/etcd - version: 197cce8d36e4826d252c024d4e4025d7ac78385b - subpackages: - - etcdserver/stats - - pkg/crc - - pkg/fileutil - - pkg/httputil - - pkg/ioutil - - pkg/logutil - - pkg/pbutil - - pkg/tlsutil - - pkg/transport - - pkg/types - - raft - - raft/raftpb - - rafthttp - - snap - - snap/snappb - - version - - wal - - wal/walpb -- name: github.com/agl/ed25519 - version: 5312a61534124124185d41f09206b9fef1d88403 - subpackages: - - edwards25519 - - extra25519 -- name: github.com/anaskhan96/base58check - version: b05365d494c4bd8b77208dfa671c609ad534b443 -- name: github.com/AndreasBriese/bbloom - version: 343706a395b76e5ca5c7dca46a5d937b48febc74 -- name: github.com/apache/thrift - version: a34f78385f3cd5105d9d82a8922837ffab56e281 - subpackages: - - lib/go/thrift -- name: github.com/beorn7/perks - version: 3a771d992973f24aa725d07868b467d1ddfceafb - subpackages: - - quantile -- name: github.com/btcsuite/btcd - version: 306aecffea325e97f513b3ff0cf7895a5310651d - subpackages: - - btcec -- name: github.com/c-bata/go-prompt - version: df16feb8c1cbf7cffd957596d8428fe5b2a4c738 - subpackages: - - internal/bisect - - internal/debug - - internal/strings - - internal/term -- name: github.com/coreos/go-semver - version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6 - subpackages: - - semver -- name: github.com/coreos/go-systemd - version: d2196463941895ee908e13531a23a39feb9e1243 - subpackages: - - daemon - - journal - - util -- name: github.com/coreos/pkg - version: 3ac0863d7acf3bc44daf49afef8919af12f704ef - subpackages: - - capnslog -- name: github.com/DataDog/zstd - version: 1e382f59b41eebd6f592c5db4fd1958ec38a0eba -- name: github.com/davecgh/go-spew - version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 - subpackages: - - spew -- name: github.com/derekparker/trie - version: e608c2733dc704cd4a73f825f4acab8f3c3d4d15 -- name: github.com/dgryski/go-farm - version: 3adb47b1fb0f6d9efcc5051a6c62e2e413ac85a9 -- name: github.com/eapache/go-resiliency - version: 842e16ec2c98ef0c59eebfe60d2d3500a793ba19 - subpackages: - - breaker -- name: github.com/eapache/go-xerial-snappy - version: 776d5712da21bc4762676d614db1d8a64f4238b0 -- name: github.com/eapache/queue - version: 093482f3f8ce946c05bcba64badd2c82369e084d -- name: github.com/emirpasic/gods - version: 729073a73ce2057955fafa2a8f0ac62b99e950c9 - subpackages: - - containers - - lists - - lists/singlylinkedlist - - stacks - - stacks/linkedliststack - - utils -- name: github.com/fd/go-nat - version: e3ba0d89e7d9f0a458bf08baae8db007eb7d242d -- name: github.com/fsnotify/fsnotify - version: ccc981bf80385c528a65fbfdd49bf2d8da22aa23 -- name: github.com/funkygao/golib - version: 90d4905c196196cce46e129063a1658fc20619de - subpackages: - - threadlocal -- name: github.com/go-logfmt/logfmt - version: 07c9b44f60d7ffdfb7d8efe1ad539965737836dc -- name: github.com/gofrs/uuid - version: 7077aa61129615a0d7f45c49101cd011ab221c27 -- name: github.com/gogo/protobuf - version: 636bf0302bc95575d69441b25a2603156ffdddf1 - subpackages: - - gogoproto - - io - - jsonpb - - proto - - protoc-gen-gogo/descriptor - - sortkeys - - types -- name: github.com/golang/mock - version: 51421b967af1f557f93a59e0057aaf15ca02e29c - subpackages: - - gomock -- name: github.com/golang/protobuf - version: b4deda0973fb4c70b50d226b1af49f3da59f5265 - subpackages: - - proto - - ptypes - - ptypes/any - - ptypes/duration - - ptypes/timestamp -- name: github.com/golang/snappy - version: 2a8bb927dd31d8daada140a5d09578521ce5c36a -- name: github.com/google/uuid - version: 9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8 -- name: github.com/gorilla/websocket - version: 4201258b820c74ac8e6922fc9e6b52f71fe46f8d -- name: github.com/grpc-ecosystem/grpc-opentracing - version: 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 - subpackages: - - go/otgrpc -- name: github.com/gxed/eventfd - version: 80a92cca79a8041496ccc9dd773fcb52a57ec6f9 -- name: github.com/gxed/GoEndian - version: 0f5c6873267e5abf306ffcdfcfa4bf77517ef4a7 -- name: github.com/gxed/hashland - version: a72cc0875a1e95edd309d3134bc7c11bf2d7360b - subpackages: - - keccakpg - - murmur3 -- name: github.com/hashicorp/golang-lru - version: 0fb14efe8c47ae851c0034ed7a448854d3d34cf3 - subpackages: - - simplelru -- name: github.com/hashicorp/hcl - version: 65a6292f0157eff210d03ed1bf6c59b190b8b906 - subpackages: - - hcl/ast - - hcl/parser - - hcl/printer - - hcl/scanner - - hcl/strconv - - hcl/token - - json/parser - - json/scanner - - json/token -- name: github.com/huin/goupnp - version: 656e61dfadd241c7cbdd22a023fa81ecb6860ea8 - subpackages: - - dcps/internetgateway1 - - dcps/internetgateway2 - - httpu - - scpd - - soap - - ssdp -- name: github.com/improbable-eng/grpc-web - version: f683dbb3b587cfcc858dcbc2fce782c610fc4979 - subpackages: - - go/grpcweb -- name: github.com/inconshreveable/mousetrap - version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/ipfs/go-log - version: 8924f37936b0db0ffa60a1b0aba3ae973ed8c907 - subpackages: - - tracer - - tracer/wire - - writer -- name: github.com/jackpal/gateway - version: cbcf4e3f3baee7952fc386c8b2534af4d267c875 -- name: github.com/jackpal/go-nat-pmp - version: d89d09f6f3329bc3c2479aa3cafd76a5aa93a35c -- name: github.com/jbenet/go-temp-err-catcher - version: aac704a3f4f27190b4ccc05f303a4931fd1241ff -- name: github.com/jbenet/goprocess - version: b497e2f366b8624394fb2e89c10ab607bebdde0b - subpackages: - - context - - periodic - - ratelimit -- name: github.com/kr/logfmt - version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 -- name: github.com/libp2p/go-addr-util - version: c4f1391488730ec35afad51f1f6ce8fca622bc11 -- name: github.com/libp2p/go-buffer-pool - version: a8d831235797607f736331e5f8c702e92550fb7b -- name: github.com/libp2p/go-conn-security - version: b2fb4ac68c410f0365f4aa807bde5cfdcb73acdc - subpackages: - - insecure -- name: github.com/libp2p/go-conn-security-multistream - version: 104c8a4422af6d10f488a14b64a43d7c92be0475 -- name: github.com/libp2p/go-flow-metrics - version: 7e5a55af485341567f98d6847a373eb5ddcdcd43 -- name: github.com/libp2p/go-libp2p - version: 2787133b0469926b58dd819707cc4297a3ca7306 - subpackages: - - config - - p2p/host/basic - - p2p/protocol/identify - - p2p/protocol/identify/pb -- name: github.com/libp2p/go-libp2p-circuit - version: 028b1071af2e97304d2a3d9d456ba867ca5ba7d9 - subpackages: - - pb -- name: github.com/libp2p/go-libp2p-crypto - version: 274de1bb6c27780863df6b230c91324ab481dab2 - subpackages: - - pb -- name: github.com/libp2p/go-libp2p-host - version: 28ec0a42060315368874a2e09d7cd4dc6102814c -- name: github.com/libp2p/go-libp2p-interface-connmgr - version: 74fba35f582dc5026b6dc1c43a7616f01c4598a9 -- name: github.com/libp2p/go-libp2p-interface-pnet - version: d240acf619f63dfb776821a1d4d28a918f77edd5 -- name: github.com/libp2p/go-libp2p-loggables - version: 4c6f0611053242074f2d6860a80bc9eb599a1f8a -- name: github.com/libp2p/go-libp2p-metrics - version: 1f0f4db0472785ecdf0b4fe57ba6253f9b0635d2 -- name: github.com/libp2p/go-libp2p-nat - version: fcc8db1a9963a3d489abbc632f2c0f404e4ff607 -- name: github.com/libp2p/go-libp2p-net - version: c070e8fb6eade68612d96e0fbace731f89897d33 -- name: github.com/libp2p/go-libp2p-peer - version: 993d742bc29dcf4894b7730ba610fd78900be76c -- name: github.com/libp2p/go-libp2p-peerstore - version: eef8402e045f9a9513070c30d2c4ee78fd54f10b - subpackages: - - addr - - pstoremem -- name: github.com/libp2p/go-libp2p-protocol - version: e34f0d7468b3519bf9bf4e43c1d028ce651eab51 -- name: github.com/libp2p/go-libp2p-secio - version: aa2813e066f68d5ce565bb007a043a2f69ac6148 - subpackages: - - pb -- name: github.com/libp2p/go-libp2p-swarm - version: 67f7e37245d17c8308dcf6ac5f5f0f66797694ef -- name: github.com/libp2p/go-libp2p-transport - version: e8580c8a519d0fd04959a4ab25777d2819801447 -- name: github.com/libp2p/go-libp2p-transport-upgrader - version: 49139764f899e43870cd4c0485a8bbbb89f467db -- name: github.com/libp2p/go-maddr-filter - version: f2e84f9bcf48cffa2188e236ccb596891d6b574b -- name: github.com/libp2p/go-mplex - version: 8ac902b6abdf9f65dbb520de1c9bde952da98469 -- name: github.com/libp2p/go-msgio - version: f8aaa1f70c8b4d3bff511251db3d8fbac7ce2839 -- name: github.com/libp2p/go-reuseport - version: dd0c37d7767bc38280bd9813145b65f8bd560629 - subpackages: - - poll - - singlepoll -- name: github.com/libp2p/go-reuseport-transport - version: 5cdb097c8035e75fc59d12f22509aeb700a272d0 -- name: github.com/libp2p/go-sockaddr - version: a7494d4eefeb607c8bc491cf8850a6e8dbd41cab - subpackages: - - net -- name: github.com/libp2p/go-stream-muxer - version: a3f82916c8ad4fb33755ab42eea6e03f8d754839 -- name: github.com/libp2p/go-tcp-transport - version: 5e52db593970b614b25507ad331d098cdc25998b -- name: github.com/libp2p/go-ws-transport - version: 0b3b66c1345b7db6c2e163c6816e34d6ab4f595a -- name: github.com/magiconair/properties - version: 7757cc9fdb852f7579b24170bcacda2c7471bb6a - subpackages: - - assert -- name: github.com/mattn/go-colorable - version: 3a70a971f94a22f2fa562ffcc7a0eb45f5daf045 -- name: github.com/mattn/go-isatty - version: 369ecd8cea9851e459abb67eb171853e3986591e -- name: github.com/mattn/go-runewidth - version: 703b5e6b11ae25aeb2af9ebb5d5fdf8fa2575211 -- name: github.com/mattn/go-tty - version: e4f871175a2f903ed2e6353334fdccb507c1d09e -- name: github.com/matttproud/golang_protobuf_extensions - version: c182affec369e30f25d3eb8cd8a478dee585ae7d - subpackages: - - pbutil -- name: github.com/minio/blake2b-simd - version: 3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4 -- name: github.com/minio/sha256-simd - version: ad98a36ba0da87206e3378c556abbfeaeaa98668 -- name: github.com/mitchellh/mapstructure - version: 3536a929edddb9a5b34bd6861dc4a9647cb459fe -- name: github.com/mr-tron/base58 - version: fe73eb13120270ef478822e38664f5e56dc39547 - subpackages: - - base58 -- name: github.com/multiformats/go-multiaddr - version: 2b4e098f3e0aa2c8bc960f0e4bdc3247efc3749c -- name: github.com/multiformats/go-multiaddr-dns - version: ab7059599c0215374d05ceb716f54d6ed9bb1d5f -- name: github.com/multiformats/go-multiaddr-net - version: cba4f9fea8613343eb7ecc4ddadd8e7298a00c39 -- name: github.com/multiformats/go-multihash - version: 1a04c485626b992b36afcaa599584fdb0770c397 -- name: github.com/multiformats/go-multistream - version: 2b032632ecab1e1b98c8d2391a4f6ab9a6c9e140 -- name: github.com/opentracing-contrib/go-observer - version: a52f2342449246d5bcc273e65cbdcfa5f7d6c63c -- name: github.com/opentracing/opentracing-go - version: 25a84ff92183e2f8ac018ba1db54f8a07b3c0e04 - subpackages: - - ext - - log -- name: github.com/openzipkin-contrib/zipkin-go-opentracing - version: 26cf9707480e6b90e5eff22cf0bbf05319154232 - subpackages: - - flag - - types -- name: github.com/openzipkin/zipkin-go-opentracing - version: f0f479ad013a498e4cbfb369414e5d3880903779 - subpackages: - - flag - - thrift/gen-go/scribe - - thrift/gen-go/zipkincore - - types - - wire -- name: github.com/orcaman/concurrent-map - version: 7ed82d9cb71768a4e3656ee8837c7af568c75459 -- name: github.com/pelletier/go-toml - version: 27c6b39a135b7dc87a14afb068809132fb7a9a8f -- name: github.com/pierrec/lz4 - version: 062282ea0dcff40c9fb8525789eef9644b1fbd6e - subpackages: - - internal/xxh32 -- name: github.com/pkg/errors - version: 27936f6d90f9c8e1145f11ed52ffffbfdb9e0af7 -- name: github.com/pkg/term - version: aa71e9d9e942418fbb97d80895dcea70efed297c - subpackages: - - termios -- name: github.com/pmezard/go-difflib - version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc - subpackages: - - difflib -- name: github.com/prometheus/client_golang - version: 5cec1d0429b02e4323e042eb04dafdb079ddf568 - subpackages: - - prometheus - - prometheus/promhttp -- name: github.com/prometheus/client_model - version: 6f3806018612930941127f2a7c6c453ba2c527d2 - subpackages: - - go -- name: github.com/prometheus/common - version: e3fb1a1acd7605367a2b378bc2e2f893c05174b7 - subpackages: - - expfmt - - internal/bitbucket.org/ww/goautoneg - - model -- name: github.com/prometheus/procfs - version: a6e9df898b1336106c743392c48ee0b71f5c4efa - subpackages: - - xfs -- name: github.com/rcrowley/go-metrics - version: 3113b8401b8a98917cde58f8bbd42a1b1c03b1fd -- name: github.com/rs/cors - version: 76f58f330d76a55c5badc74f6212e8a15e742c77 -- name: github.com/rs/zerolog - version: 6d6350a51143b5c0d0a6a3b736ee2b41315f7269 - subpackages: - - internal/cbor - - internal/json -- name: github.com/serialx/hashring - version: 49a4782e9908fe098c907022a1bd7519c79803d6 -- name: github.com/Shopify/sarama - version: 6bc31ae56bad30b3fb2a6c7548aeb32c5051bdd2 -- name: github.com/soheilhy/cmux - version: e09e9389d85d8492d313d73d1469c029e710623f -- name: github.com/spaolacci/murmur3 - version: f09979ecbc725b9e6d41a297405f65e7e8804acc -- name: github.com/spf13/afero - version: f4711e4db9e9a1d3887343acb72b2bbfc2f686f5 - subpackages: - - mem -- name: github.com/spf13/cast - version: 8c9545af88b134710ab1cd196795e7f2388358d7 -- name: github.com/spf13/cobra - version: ef82de70bb3f60c65fb8eebacbb2d122ef517385 -- name: github.com/spf13/jwalterweatherman - version: 94f6ae3ed3bceceafa716478c5fbf8d29ca601a1 -- name: github.com/spf13/pflag - version: 298182f68c66c05229eb03ac171abe6e309ee79a -- name: github.com/spf13/viper - version: 6d33b5a963d922d182c91e8a1c88d81fd150cfd4 -- name: github.com/stretchr/objx - version: c61a9dfcced1815e7d40e214d00d1a8669a9f58c -- name: github.com/stretchr/testify - version: 3f658bd5ac42cc0b5de5b427e95a480d5726ea76 - subpackages: - - assert - - mock -- name: github.com/sunpuyo/badger - version: bb757672e2c7e90b0707ab643972dbfcf638aebf - subpackages: - - options - - protos - - skl - - table - - "y" -- name: github.com/syndtr/goleveldb - version: 4217c9f31f5816db02addc94e56061da77f288d8 - subpackages: - - leveldb - - leveldb/cache - - leveldb/comparer - - leveldb/errors - - leveldb/filter - - leveldb/iterator - - leveldb/journal - - leveldb/memdb - - leveldb/opt - - leveldb/storage - - leveldb/table - - leveldb/util -- name: github.com/whyrusleeping/go-logging - version: 0457bb6b88fc1973573aaf6b5145d8d3ae972390 -- name: github.com/whyrusleeping/go-notifier - version: 097c5d47330ff6a823f67e3515faa13566a62c6f -- name: github.com/whyrusleeping/go-smux-multiplex - version: 40e9838863a304ad821775427dda6f965803526f -- name: github.com/whyrusleeping/go-smux-multistream - version: 8e5c10881353c91b4214466d916ad8429508c056 -- name: github.com/whyrusleeping/go-smux-yamux - version: 28bea8f315d17495c5cad8547b661245486ebeee -- name: github.com/whyrusleeping/mafmt - version: faeb70738e904fdbef70bcf62ec8c38726c16d17 -- name: github.com/whyrusleeping/multiaddr-filter - version: e903e4adabd70b78bc9293b6ee4f359afb3f9f59 -- name: github.com/whyrusleeping/yamux - version: 5364a42fe4b5efa5967c11c8f9b0f049cac0c4a9 -- name: github.com/willf/bitset - version: e553b05586428962bf7058d1044519d87ca72d74 -- name: github.com/willf/bloom - version: 54e3b963ee1652b06c4562cb9b6020ebc6e36e59 -- name: github.com/Workiva/go-datastructures - version: f07cbe3f82ca2fd6e5ab94afce65fe43319f675f - subpackages: - - queue -- name: github.com/xiang90/probing - version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2 -- name: golang.org/x/crypto - version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3 - subpackages: - - bcrypt - - blake2s - - blowfish - - sha3 - - ssh/terminal -- name: golang.org/x/net - version: 66aacef3dd8a676686c7ae3716979581e8b03c47 - subpackages: - - context - - html - - html/atom - - html/charset - - http2 - - http2/hpack - - idna - - internal/timeseries - - lex/httplex - - trace -- name: golang.org/x/sys - version: ebfc5b4631820b793c9010c87fd8fef0f39eb082 - subpackages: - - unix - - windows -- name: golang.org/x/text - version: b19bf474d317b857955b12035d2c5acb57ce8b01 - subpackages: - - encoding - - encoding/charmap - - encoding/htmlindex - - encoding/internal - - encoding/internal/identifier - - encoding/japanese - - encoding/korean - - encoding/simplifiedchinese - - encoding/traditionalchinese - - encoding/unicode - - internal/tag - - internal/utf8internal - - language - - runes - - secure/bidirule - - transform - - unicode/bidi - - unicode/norm -- name: golang.org/x/time - version: c06e80d9300e4443158a03817b8a8cb37d230320 - subpackages: - - rate -- name: google.golang.org/genproto - version: 09f6ed296fc66555a25fe4ce95173148778dfa85 - subpackages: - - googleapis/rpc/status -- name: google.golang.org/grpc - version: 168a6198bcb0ef175f7dacec0b8691fc141dc9b8 - subpackages: - - balancer - - balancer/base - - balancer/roundrobin - - codes - - connectivity - - credentials - - encoding - - encoding/proto - - grpclog - - internal - - internal/backoff - - internal/channelz - - internal/grpcrand - - keepalive - - metadata - - naming - - peer - - resolver - - resolver/dns - - resolver/passthrough - - stats - - status - - tap - - transport -- name: gopkg.in/yaml.v2 - version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b -testImports: [] diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index 9cdae2f81..000000000 --- a/glide.yaml +++ /dev/null @@ -1,108 +0,0 @@ -package: github.com/aergoio/aergo -homepage: https://github.com/aergoio/aergo -license: MIT -excludeDirs: -- p2p/mocks -- message/mocks -import: -- package: github.com/gogo/protobuf - version: ~1.1.0 - subpackages: - - jsonpb - - proto -- package: github.com/golang/protobuf - version: ~1.1.0 - subpackages: - - proto - - ptypes/timestamp -- package: github.com/libp2p/go-libp2p - version: gx/v6.0.13 -- package: github.com/libp2p/go-libp2p-crypto - version: gx/v2.0.1 -- package: github.com/libp2p/go-libp2p-host - version: gx/v3.0.10 -- package: github.com/libp2p/go-libp2p-interface-connmgr - version: gx/v0.0.16 -- package: github.com/libp2p/go-libp2p-nat - version: gx/v0.8.7 -- package: github.com/libp2p/go-libp2p-net - version: gx/v3.0.10 -- package: github.com/libp2p/go-libp2p-peer - version: gx/v2.3.8 -- package: github.com/libp2p/go-libp2p-peerstore - version: gx/v2.0.1 -- package: github.com/libp2p/go-libp2p-protocol - version: gx/v1.0.0 -- package: github.com/libp2p/go-reuseport - version: gx/v0.1.18 -- package: github.com/libp2p/go-reuseport-transport - version: gx/v0.1.10 -- package: github.com/libp2p/go-libp2p-swarm - version: gx/v3.0.12 -- package: github.com/libp2p/go-libp2p-transport-upgrader - version: gx/v0.1.10 -- package: github.com/libp2p/go-tcp-transport - version: gx/v2.0.10 -- package: github.com/mr-tron/base58 - subpackages: - - base58 -- package: github.com/gofrs/uuid - version: ~3.1.2 -- package: github.com/spf13/cobra - version: ~0.0.3 -- package: golang.org/x/net - subpackages: - - context -- package: google.golang.org/grpc - version: ~1.13.0 - subpackages: - - codes - - status -- package: github.com/multiformats/go-multiaddr-net - version: gx/v1.6.3 -- package: github.com/multiformats/go-multistream - version: gx/v0.3.7 -- package: github.com/multiformats/go-multiaddr - version: gx/v1.3.0 -- package: github.com/multiformats/go-multiaddr-dns - version: gx/v0.2.4 -- package: github.com/davecgh/go-spew - version: ~1.1.0 -- package: github.com/rcrowley/go-metrics -- package: github.com/aergoio/aergo-actor -- package: github.com/hashicorp/golang-lru - version: 0fb14efe8c47ae851c0034ed7a448854d3d34cf3 -- package: github.com/improbable-eng/grpc-web - version: ~0.6.2 - subpackages: - - go/grpcweb -- package: github.com/soheilhy/cmux - version: ~0.1.4 -- package: github.com/aergoio/aergo-lib -- package: github.com/minio/sha256-simd - version: ad98a36ba0da87206e3378c556abbfeaeaa98668 -- package: github.com/anaskhan96/base58check -- package: github.com/derekparker/trie - version: e608c2733dc704cd4a73f825f4acab8f3c3d4d15 -- package: github.com/golang/mock - version: ~v1.2.0 - subpackages: - - gomock -- package: github.com/openzipkin-contrib/zipkin-go-opentracing - version: v0.3.4 -- package: github.com/grpc-ecosystem/grpc-opentracing - subpackages: - - go/otgrpc -- package: github.com/funkygao/golib - subpackages: - - threadlocal -- package: github.com/willf/bitset - version: =1.1.9 -- package: github.com/willf/bloom - version: =2.0.3 -- package: github.com/aergoio/etcd - version: aergo -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..b2caad241 --- /dev/null +++ b/go.mod @@ -0,0 +1,122 @@ +module github.com/aergoio/aergo + +go 1.12 + +require ( + github.com/BurntSushi/toml v0.3.1 // indirect + github.com/Shopify/sarama v0.0.0-20190226131337-6bc31ae56bad // indirect + github.com/Workiva/go-datastructures v1.0.50 // indirect + github.com/aergoio/aergo-actor v0.0.0-20190219030625-562037d5fec7 + github.com/aergoio/aergo-lib v0.0.0-20190207031344-f822f6e881ff + github.com/aergoio/etcd v0.0.0-20190429013412-e8b3f96f6399 + github.com/anaskhan96/base58check v0.0.0-20181220122047-b05365d494c4 + github.com/apache/thrift v0.0.0-20190225222118-a34f78385f3c // indirect + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect + github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32 + github.com/c-bata/go-prompt v0.0.0-20190223080847-df16feb8c1cb + github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf // indirect + github.com/davecgh/go-spew v1.1.1 + github.com/derekparker/trie v0.0.0-20180212171413-e608c2733dc7 + github.com/emirpasic/gods v0.0.0-20190124120704-729073a73ce2 // indirect + github.com/fd/go-nat v0.0.0-20190212120450-e3ba0d89e7d9 // indirect + github.com/fsnotify/fsnotify v1.4.7 + github.com/funkygao/assert v0.0.0-20160929004900-4a267e33bc79 // indirect + github.com/funkygao/golib v0.0.0-20180314131852-90d4905c1961 + github.com/go-logfmt/logfmt v0.4.0 // indirect + github.com/gofrs/uuid v3.2.0+incompatible + github.com/gogo/protobuf v1.2.1 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/mock v1.2.0 + github.com/golang/protobuf v1.3.0 + github.com/golang/snappy v0.0.1 // indirect + github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c // indirect + github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 + github.com/guptarohit/asciigraph v0.4.1 // indirect + github.com/gxed/GoEndian v0.0.0-20160916112711-0f5c6873267e // indirect + github.com/gxed/eventfd v0.0.0-20160916113412-80a92cca79a8 // indirect + github.com/hashicorp/golang-lru v0.5.1 + github.com/huin/goupnp v1.0.0 // indirect + github.com/improbable-eng/grpc-web v0.0.0-20181031170435-f683dbb3b587 + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jbenet/go-cienv v0.1.0 // indirect + github.com/jbenet/go-randbuf v0.0.0-20160322125720-674640a50e6a // indirect + github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 // indirect + github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 + github.com/libp2p/go-addr-util v0.0.0-20190226202102-c4f139148873 + github.com/libp2p/go-conn-security v0.0.0-20190226201940-b2fb4ac68c41 // indirect + github.com/libp2p/go-conn-security-multistream v0.0.0-20190226202109-104c8a4422af // indirect + github.com/libp2p/go-flow-metrics v0.0.0-20180906182756-7e5a55af4853 // indirect + github.com/libp2p/go-libp2p v0.0.0-20180924121117-2787133b0469 + github.com/libp2p/go-libp2p-blankhost v0.0.1 // indirect + github.com/libp2p/go-libp2p-circuit v0.0.0-20190226203123-028b1071af2e // indirect + github.com/libp2p/go-libp2p-crypto v0.0.1 + github.com/libp2p/go-libp2p-host v0.0.1 + github.com/libp2p/go-libp2p-interface-connmgr v0.0.1 + github.com/libp2p/go-libp2p-interface-pnet v0.0.0-20180919000501-d240acf619f6 // indirect + github.com/libp2p/go-libp2p-loggables v0.0.0-20190226201908-4c6f06110532 // indirect + github.com/libp2p/go-libp2p-metrics v0.0.0-20190226174147-1f0f4db04727 // indirect + github.com/libp2p/go-libp2p-nat v0.0.0-20180924121107-fcc8db1a9963 // indirect + github.com/libp2p/go-libp2p-net v0.0.1 + github.com/libp2p/go-libp2p-peer v0.0.1 + github.com/libp2p/go-libp2p-peerstore v0.0.1 + github.com/libp2p/go-libp2p-protocol v0.0.1 + github.com/libp2p/go-libp2p-secio v0.0.0-20190226201947-aa2813e066f6 // indirect + github.com/libp2p/go-libp2p-swarm v0.0.0-20180924121054-67f7e37245d1 // indirect + github.com/libp2p/go-libp2p-transport v0.0.0-20190226201958-e8580c8a519d // indirect + github.com/libp2p/go-libp2p-transport-upgrader v0.0.0-20180924121042-49139764f899 // indirect + github.com/libp2p/go-maddr-filter v0.0.0-20190226202016-f2e84f9bcf48 // indirect + github.com/libp2p/go-mplex v0.0.0-20190218180303-8ac902b6abdf // indirect + github.com/libp2p/go-msgio v0.0.0-20190117001650-f8aaa1f70c8b // indirect + github.com/libp2p/go-reuseport v0.0.0-20180924121034-dd0c37d7767b // indirect + github.com/libp2p/go-reuseport-transport v0.0.0-20180924121035-5cdb097c8035 // indirect + github.com/libp2p/go-sockaddr v0.0.0-20180904171932-a7494d4eefeb // indirect + github.com/libp2p/go-tcp-transport v0.0.0-20180924121043-5e52db593970 // indirect + github.com/libp2p/go-testutil v0.0.1 // indirect + github.com/libp2p/go-ws-transport v0.0.0-20190226202206-0b3b66c1345b // indirect + github.com/magiconair/properties v1.8.0 + github.com/mattn/go-colorable v0.1.1 + github.com/mattn/go-isatty v0.0.6 // indirect + github.com/mattn/go-tty v0.0.0-20181127064339-e4f871175a2f // indirect + github.com/matttproud/golang_protobuf_extensions v0.0.0-20181231171920-c182affec369 // indirect + github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 + github.com/mr-tron/base58 v1.1.0 + github.com/multiformats/go-multiaddr v0.0.1 + github.com/multiformats/go-multiaddr-net v0.0.1 + github.com/multiformats/go-multistream v0.0.1 + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 // indirect + github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/opentracing/opentracing-go v1.0.2 + github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.4 + github.com/openzipkin/zipkin-go-opentracing v0.3.5 // indirect + github.com/orcaman/concurrent-map v0.0.0-20190107190726-7ed82d9cb717 // indirect + github.com/pkg/errors v0.8.1 + github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942 // indirect + github.com/prometheus/client_golang v0.0.0-20171005112915-5cec1d0429b0 // indirect + github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 // indirect + github.com/prometheus/common v0.0.0-20171104095907-e3fb1a1acd76 // indirect + github.com/prometheus/procfs v0.0.0-20171017214025-a6e9df898b13 // indirect + github.com/rs/cors v0.0.0-20190116175910-76f58f330d76 // indirect + github.com/rs/zerolog v1.14.3 + github.com/serialx/hashring v0.0.0-20180504054112-49a4782e9908 // indirect + github.com/soheilhy/cmux v0.1.4 + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.2.1 // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/viper v1.3.2 // indirect + github.com/stretchr/testify v1.3.0 + github.com/sunpuyo/badger v0.0.0-20181022123248-bb757672e2c7 // indirect + github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f // indirect + github.com/whyrusleeping/go-smux-multiplex v0.0.0-20190220031927-40e9838863a3 // indirect + github.com/whyrusleeping/go-smux-multistream v0.0.0-20190218181238-8e5c10881353 // indirect + github.com/whyrusleeping/go-smux-yamux v0.0.0-20190220032110-28bea8f315d1 // indirect + github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 // indirect + github.com/whyrusleeping/yamux v1.1.5 // indirect + github.com/willf/bitset v1.1.9 // indirect + github.com/willf/bloom v2.0.3+incompatible + github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + golang.org/x/time v0.0.0-20170420181420-c06e80d9300e // indirect + google.golang.org/grpc v1.13.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..6e25cb349 --- /dev/null +++ b/go.sum @@ -0,0 +1,395 @@ +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7 h1:PqzgE6kAMi81xWQA2QIVxjWkFHptGgC547vchpUbtFo= +github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= +github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= +github.com/Shopify/sarama v0.0.0-20190226131337-6bc31ae56bad h1:DYOWGHcBR9TDB484+flPCnkU6uk6fyS8UxxSrgpFBZk= +github.com/Shopify/sarama v0.0.0-20190226131337-6bc31ae56bad/go.mod h1:yuqtN/pe8cXRWG5zPaO7hCfNJp5MwmkoJEoLjkm5tCQ= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/Workiva/go-datastructures v1.0.50 h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo= +github.com/Workiva/go-datastructures v1.0.50/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/aergoio/aergo-actor v0.0.0-20190219030625-562037d5fec7 h1:dYRi21hTS3fJwjSh/KZTAuLXUbV/Ijm2395Nf4b3wNs= +github.com/aergoio/aergo-actor v0.0.0-20190219030625-562037d5fec7/go.mod h1:/nqZcvcM0UipJRnUm61LrQ8rC7IyBr8mfx5F00sCbvs= +github.com/aergoio/aergo-lib v0.0.0-20190207031344-f822f6e881ff h1:bI0pLvB5UcBAQRDMwkFnV8G6zzqUa2VO/+sJNCP0NwA= +github.com/aergoio/aergo-lib v0.0.0-20190207031344-f822f6e881ff/go.mod h1:5Y33tDK3yEXHOp0l2GWrl1SEprH42/yYve5/D76zDfk= +github.com/aergoio/etcd v0.0.0-20190429013412-e8b3f96f6399 h1:dJSTOiNe0xJFreGUBSh4bY3KGDxjilUVxAKqjij1pcw= +github.com/aergoio/etcd v0.0.0-20190429013412-e8b3f96f6399/go.mod h1:Blp9ztau8P3FoDynvGUeKUD6qqW/2xQ80kNwU+ywPUM= +github.com/anaskhan96/base58check v0.0.0-20181220122047-b05365d494c4 h1:FUDNaUiPOxrVtUmsRSdx7hrvCKXpfQafPpPU0Yh27os= +github.com/anaskhan96/base58check v0.0.0-20181220122047-b05365d494c4/go.mod h1:glPG1rmt/bD3wEXWanFIuoPjC4MG+JEN+i7YhwEYA/Y= +github.com/apache/thrift v0.0.0-20190225222118-a34f78385f3c h1:5v8PQyL1qRhPo9ULeXubCSPaceYYwj3fEq3mHqkTmdw= +github.com/apache/thrift v0.0.0-20190225222118-a34f78385f3c/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32 h1:qkOC5Gd33k54tobS36cXdAzJbeHaduLtnLQQwNoIi78= +github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/c-bata/go-prompt v0.0.0-20190223080847-df16feb8c1cb h1:vMlJPOcrh9/hor2I5YEmRzhG+SDiveKYORbvbjWrXm8= +github.com/c-bata/go-prompt v0.0.0-20190223080847-df16feb8c1cb/go.mod h1:r3+2ndvD23nUkN89DAba+DuXLHnaDwNrpvmSl/eYGUU= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf h1:CAKfRE2YtTUIjjh1bkBtyYFaUT/WmOqsJjgtihT0vMI= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/derekparker/trie v0.0.0-20180212171413-e608c2733dc7 h1:Cab9yoTQh1TxObKfis1DzZ6vFLK5kbeenMjRES/UE3o= +github.com/derekparker/trie v0.0.0-20180212171413-e608c2733dc7/go.mod h1:D6ICZm05D9VN1n/8iOtBxLpXtoGp6HDFUJ1RNVieOSE= +github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f h1:dDxpBYafY/GYpcl+LS4Bn3ziLPuEdGRkRjYAbSlWxSA= +github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/emirpasic/gods v0.0.0-20190124120704-729073a73ce2 h1:vgfGhC6I4lsBEJZG3Co+PsGIdGjpHjuce1L1+M+JLyw= +github.com/emirpasic/gods v0.0.0-20190124120704-729073a73ce2/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/fd/go-nat v0.0.0-20190212120450-e3ba0d89e7d9 h1:B4wY7mIuy68KveToKwoZApuf4nSaZgHZSL3N5oUdgqg= +github.com/fd/go-nat v0.0.0-20190212120450-e3ba0d89e7d9/go.mod h1:DKGczljysEtlWmksIvj6HlBt+Ejle2ffzcCG7iTRK5k= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/funkygao/assert v0.0.0-20160929004900-4a267e33bc79 h1:pMT64UqbHiYr/i6NEYi7MQrCbySinLNS/rXM8hjJpFE= +github.com/funkygao/assert v0.0.0-20160929004900-4a267e33bc79/go.mod h1:f/3KPzEHvMhxhf0Kh6QUNJffbavpJUCyFWw1hnW6yXg= +github.com/funkygao/golib v0.0.0-20180314131852-90d4905c1961 h1:17GfB8KI6sVSDbk6P9JmXhYcXI83brT609GUOviGSSs= +github.com/funkygao/golib v0.0.0-20180314131852-90d4905c1961/go.mod h1:o83CLAArAI7NmTbznViTftc/ELn38qwnCOGsRI/DgR4= +github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c h1:Lh2aW+HnU2Nbe1gqD9SOJLJxW1jBMmQOktN2acDyJk8= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/guptarohit/asciigraph v0.4.1 h1:YHmCMN8VH81BIUIgTg2Fs3B52QDxNZw2RQ6j5pGoSxo= +github.com/guptarohit/asciigraph v0.4.1/go.mod h1:9fYEfE5IGJGxlP1B+w8wHFy7sNZMhPtn59f0RLtpRFM= +github.com/gxed/GoEndian v0.0.0-20160916112711-0f5c6873267e h1:eIhARPSF2zPr1hKxiL81XWQ392f5stEEcs38UzZVSWo= +github.com/gxed/GoEndian v0.0.0-20160916112711-0f5c6873267e/go.mod h1:vckkIQ0K+GGne8aC4LseYg586YwBQhOxXMXGAmKsCdY= +github.com/gxed/eventfd v0.0.0-20160916113412-80a92cca79a8 h1:N97hyGE4Q7bfXLQHvCtVvhLA9ofDkh5nzFcaB+1kLic= +github.com/gxed/eventfd v0.0.0-20160916113412-80a92cca79a8/go.mod h1:UNZeDpt9TUOMKVo89Fm0D2Ql3htmIN8BzxIcQcmogzs= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= +github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= +github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/improbable-eng/grpc-web v0.0.0-20181031170435-f683dbb3b587 h1:Gzeq8+RIFAozMNlwHrMEYJBCKCaT2WFvqg8m4wv0hi4= +github.com/improbable-eng/grpc-web v0.0.0-20181031170435-f683dbb3b587/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= +github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= +github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= +github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= +github.com/ipfs/go-log v0.0.1 h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc= +github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= +github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= +github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= +github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= +github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-randbuf v0.0.0-20160322125720-674640a50e6a h1:MUZ5SN21sddytgKf4tb6rKkzO9uMLlZxIV5IpW/ZgRY= +github.com/jbenet/go-randbuf v0.0.0-20160322125720-674640a50e6a/go.mod h1:z659Yhk+3iK3C42MGnioXmP9y8uly7LuJaoDGyQyJp4= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2 h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A= +github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8 h1:bspPhN+oKYFk5fcGNuQzp6IGzYQSenLEgH3s6jkXrWw= +github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/libp2p/go-addr-util v0.0.0-20190226202102-c4f139148873 h1:4wwPGbuccerKO4WZ0uZHHakdUB7ISY105DnXDb1oQDA= +github.com/libp2p/go-addr-util v0.0.0-20190226202102-c4f139148873/go.mod h1:doh2auL/Rkfq1xHxDrWJGHZ8yDojPerOZoPu9XVwRB8= +github.com/libp2p/go-buffer-pool v0.0.1 h1:9Rrn/H46cXjaA2HQ5Y8lyhOS1NhTkZ4yuEs2r3Eechg= +github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= +github.com/libp2p/go-conn-security v0.0.0-20190226201940-b2fb4ac68c41 h1:9mAB66HuUt8oS9GWLTAmV2o3Qvd9lZJMPUCsieuNPp0= +github.com/libp2p/go-conn-security v0.0.0-20190226201940-b2fb4ac68c41/go.mod h1:lIqm8x2wrAxlG+6CzJ3z4Nk5/F73zdjkS6RKbHSbaJ4= +github.com/libp2p/go-conn-security-multistream v0.0.0-20190226202109-104c8a4422af h1:xY38sddayxZAA6CWOFSrhK/syLiCd0u9ZzNCRM4jFi4= +github.com/libp2p/go-conn-security-multistream v0.0.0-20190226202109-104c8a4422af/go.mod h1:hBY3HYjGnK86P3pX3wgr9IAfe3A/vZB830LJAMi1g88= +github.com/libp2p/go-flow-metrics v0.0.0-20180906182756-7e5a55af4853 h1:i/j3E8Pyf/qIMJ7vnQ/NYcb8fvrj/uZ6m13G1gP2e8A= +github.com/libp2p/go-flow-metrics v0.0.0-20180906182756-7e5a55af4853/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= +github.com/libp2p/go-libp2p v0.0.0-20180924121117-2787133b0469 h1:A3Th6p+PY1nXorWOOG0CBoicqvg0GG+krQ4wejVwrFA= +github.com/libp2p/go-libp2p v0.0.0-20180924121117-2787133b0469/go.mod h1:CyUlFa6Mw04PkmMg8gBIlHUl8j3TrEiA6oFiF4SgD8w= +github.com/libp2p/go-libp2p-blankhost v0.0.1 h1:/mZuuiwntNR8RywnCFlGHLKrKLYne+qciBpQXWqp5fk= +github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= +github.com/libp2p/go-libp2p-circuit v0.0.0-20190226203123-028b1071af2e h1:2RChWBVN/6VCq1yGjpqRnkgAssseQTaRyGhsVx5JNMw= +github.com/libp2p/go-libp2p-circuit v0.0.0-20190226203123-028b1071af2e/go.mod h1:DH3RV0Tb4cHZAdSsdNOitADXTqRiFZxJmSk7mMcCFN4= +github.com/libp2p/go-libp2p-crypto v0.0.1 h1:JNQd8CmoGTohO/akqrH16ewsqZpci2CbgYH/LmYl8gw= +github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= +github.com/libp2p/go-libp2p-host v0.0.1 h1:dnqusU+DheGcdxrE718kG4XgHNuL2n9eEv8Rg5zy8hQ= +github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.1 h1:Q9EkNSLAOF+u90L88qmE9z/fTdjLh8OsJwGw74mkwk4= +github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= +github.com/libp2p/go-libp2p-interface-pnet v0.0.0-20180919000501-d240acf619f6 h1:TiljmHO1c0NEBUZ/gEjxRKBhMl2dLy9+WL8iKdbax00= +github.com/libp2p/go-libp2p-interface-pnet v0.0.0-20180919000501-d240acf619f6/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= +github.com/libp2p/go-libp2p-loggables v0.0.0-20190226201908-4c6f06110532 h1:iLtlCD+2ZsJrtosh5CM0h2nqtgU5upMVR/KM/PyBGzs= +github.com/libp2p/go-libp2p-loggables v0.0.0-20190226201908-4c6f06110532/go.mod h1:jcP5HmpEBcQ2w5u9xhLkCv73GBPKz3weC/s6ZWhXn+k= +github.com/libp2p/go-libp2p-metrics v0.0.0-20190226174147-1f0f4db04727 h1:d+QubbWRbDzYvAf5B/xxXkQMJY8nojml2oQlVFkLPf4= +github.com/libp2p/go-libp2p-metrics v0.0.0-20190226174147-1f0f4db04727/go.mod h1:ko4lRyuvbgwwxD2TJvt2RHONahjJlkn6l7L/iEbJBf0= +github.com/libp2p/go-libp2p-nat v0.0.0-20180924121107-fcc8db1a9963 h1:7CALIwID8oZ3XuuY5TqVscv8m51GbUCeOMXvLfjcJII= +github.com/libp2p/go-libp2p-nat v0.0.0-20180924121107-fcc8db1a9963/go.mod h1:cA6+rN+EcOAzmSL3vQ53VxX/FfOL1WGeJUvZgqjm2eQ= +github.com/libp2p/go-libp2p-net v0.0.1 h1:xJ4Vh4yKF/XKb8fd1Ev0ebAGzVjMxXzrxG2kjtU+F5Q= +github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= +github.com/libp2p/go-libp2p-peer v0.0.1 h1:0qwAOljzYewINrU+Kndoc+1jAL7vzY/oY2Go4DCGfyY= +github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= +github.com/libp2p/go-libp2p-peerstore v0.0.1 h1:twKovq8YK5trLrd3nB7PD2Zu9JcyAIdm7Bz9yBWjhq8= +github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= +github.com/libp2p/go-libp2p-protocol v0.0.1 h1:+zkEmZ2yFDi5adpVE3t9dqh/N9TbpFWywowzeEzBbLM= +github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= +github.com/libp2p/go-libp2p-secio v0.0.0-20190226201947-aa2813e066f6 h1:a1vMNW2Z8bELA4Ul2DWX/vya/vIy6D6xRLqNNjK001k= +github.com/libp2p/go-libp2p-secio v0.0.0-20190226201947-aa2813e066f6/go.mod h1:U7wBlYK2sZbUiTaGe6xJd/fyNq40gwn+jBk/iEUbUrA= +github.com/libp2p/go-libp2p-swarm v0.0.0-20180924121054-67f7e37245d1 h1:SKCRx/pUB6inKHA0SF0rbxP+Hxdb5mcMXnP6tKxMTmQ= +github.com/libp2p/go-libp2p-swarm v0.0.0-20180924121054-67f7e37245d1/go.mod h1:NHa7cA4/y8OKFw3BHQjLL9pwPDFXkgECO/k+2gqSFuk= +github.com/libp2p/go-libp2p-transport v0.0.0-20190226201958-e8580c8a519d h1:ErBx9a936Bg6OXLfYn/XA4BzA0SnSZ/SKsFq9QPsj2M= +github.com/libp2p/go-libp2p-transport v0.0.0-20190226201958-e8580c8a519d/go.mod h1:lcwgOszllbhvQXul37Kv5YbSYXPoUhRB2Z+Nr3jaBmo= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.0-20180924121042-49139764f899 h1:IhbumRCSG/wB5JO3fDsaotSu7pwY7RoJZwdXCl+yR2s= +github.com/libp2p/go-libp2p-transport-upgrader v0.0.0-20180924121042-49139764f899/go.mod h1:5r+arPlxwtCEF1aVi/fTQF/ZWGSPHLxBov1DlXDevDA= +github.com/libp2p/go-maddr-filter v0.0.0-20190226202016-f2e84f9bcf48 h1:guJ19aIUZDkepcs6jaNvhsRDw/qDoc8I11kZ6NBPWyw= +github.com/libp2p/go-maddr-filter v0.0.0-20190226202016-f2e84f9bcf48/go.mod h1:9uHkmfw086BVLQyQFAbe/peEO9Iley6dafqm3cKHIAk= +github.com/libp2p/go-mplex v0.0.0-20190218180303-8ac902b6abdf h1:c8XXqVqqZ0hzHpV5dG61KGlLXXuqNHn1xMB3/z7Oyhg= +github.com/libp2p/go-mplex v0.0.0-20190218180303-8ac902b6abdf/go.mod h1:00WmJEYVKH/Kz6wCom3VmxX7re6B/jgSZmkydBYAZ1w= +github.com/libp2p/go-msgio v0.0.0-20190117001650-f8aaa1f70c8b h1:x7AMpYQPtrQ13SAFD/IKsZ35ylOIzM94LLjLBmo3aQA= +github.com/libp2p/go-msgio v0.0.0-20190117001650-f8aaa1f70c8b/go.mod h1:u5M7EDois/gQxdPuEfNYQks5cAu9oxUGDU3dRkGping= +github.com/libp2p/go-reuseport v0.0.0-20180924121034-dd0c37d7767b h1:aMR7gRGtIrahvQNtYrdXKaLYv/RT20mnwWtT3qwhgaE= +github.com/libp2p/go-reuseport v0.0.0-20180924121034-dd0c37d7767b/go.mod h1:UeLFiw50cCfyDHBpU0sXBR8ul1MO/m51mXpRO/SYjCE= +github.com/libp2p/go-reuseport-transport v0.0.0-20180924121035-5cdb097c8035 h1:Fozoia04xKDMOCNKU65YsDmkBghKVKRGzOTgJQrACoo= +github.com/libp2p/go-reuseport-transport v0.0.0-20180924121035-5cdb097c8035/go.mod h1:twFBnjvBekr78LhW4UaBX26BfN5+0EcO7Y4KwZqDlls= +github.com/libp2p/go-sockaddr v0.0.0-20180904171932-a7494d4eefeb h1:qLOzuBZ0fK3Di7wkYgQUiDItSlA9pe+DjGIGNRY/14c= +github.com/libp2p/go-sockaddr v0.0.0-20180904171932-a7494d4eefeb/go.mod h1:N/q858DTOi0BT81GpvIRIls1x7my5oLpbxYZnbRXVBM= +github.com/libp2p/go-stream-muxer v0.0.1 h1:Ce6e2Pyu+b5MC1k3eeFtAax0pW4gc6MosYSLV05UeLw= +github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= +github.com/libp2p/go-tcp-transport v0.0.0-20180924121043-5e52db593970 h1:6R5A3TOu9B9Hum4oGqhJ4ioffoUHLKs+j5psDr7TQT4= +github.com/libp2p/go-tcp-transport v0.0.0-20180924121043-5e52db593970/go.mod h1:K6Zi9TkPvn5m/m0bktlJ6Fv9Vna2wvr/jiCtTRC7T8o= +github.com/libp2p/go-testutil v0.0.1 h1:Xg+O0G2HIMfHqBOBDcMS1iSZJ3GEcId4qOxCQvsGZHk= +github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= +github.com/libp2p/go-ws-transport v0.0.0-20190226202206-0b3b66c1345b h1:C0JJ69BlXFTQ7nYcZS+5Pa5zu8iT62CLplafqWM4ENg= +github.com/libp2p/go-ws-transport v0.0.0-20190226202206-0b3b66c1345b/go.mod h1:qx7Dcw4onTaVNI3iG6q3XOKwNQWnXYhNEHYmhgQmKKk= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.6 h1:SrwhHcpV4nWrMGdNcC2kXpMfcBVYGDuTArqyhocJgvA= +github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/go-tty v0.0.0-20181127064339-e4f871175a2f h1:4P7Ul+TAnk92vTeVkXs6VLjmf1EhrYtDRa03PCYY6VM= +github.com/mattn/go-tty v0.0.0-20181127064339-e4f871175a2f/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v0.0.0-20181231171920-c182affec369 h1:Jwk0Nv0n8sHaOjslwzwDULZ+NEHcZaJqjTcL8VY0Lcc= +github.com/matttproud/golang_protobuf_extensions v0.0.0-20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-multiaddr v0.0.1 h1:/QUV3VBMDI6pi6xfiw7lr6xhDWWvQKn9udPn68kLSdY= +github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr-dns v0.0.1 h1:jQt9c6tDSdQLIlBo4tXYx7QUHCPjxsB1zXcag/2S7zc= +github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= +github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multistream v0.0.1 h1:JV4VfSdY9n7ECTtY59/TlSyFCzRILvYx4T4Ws8ZgihU= +github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.4 h1:EilBBu6IkBi8hO1gJl+vNHh0rGQYRhObchxTzQEi4q0= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.4/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= +github.com/openzipkin/zipkin-go-opentracing v0.3.5 h1:nZPvd2EmRKP+NzFdSuxZF/FG4Y4W2gn6ugXliTAu9o0= +github.com/openzipkin/zipkin-go-opentracing v0.3.5/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/orcaman/concurrent-map v0.0.0-20190107190726-7ed82d9cb717 h1:2v7IYkog9ZFN04bv5hkwjpyHkc6wujPPOVYDPp2rfwA= +github.com/orcaman/concurrent-map v0.0.0-20190107190726-7ed82d9cb717/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/term v0.0.0-20180423043932-cda20d4ac917/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942 h1:A7GG7zcGjl3jqAqGPmcNjd/D9hzL95SuoOQAaFNdLU0= +github.com/pkg/term v0.0.0-20190109203006-aa71e9d9e942/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.0.0-20171005112915-5cec1d0429b0 h1:uEiENdm9N5Nj3ezfwdvwBGc2EHLiUgD3hUTOaMfBn5E= +github.com/prometheus/client_golang v0.0.0-20171005112915-5cec1d0429b0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20171104095907-e3fb1a1acd76 h1:g2v6dZgmqj2wYGPgHYX5WVaQ9IwV1ylsSiD+f8RvS1Y= +github.com/prometheus/common v0.0.0-20171104095907-e3fb1a1acd76/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20171017214025-a6e9df898b13 h1:leRfx9kcgnSDkqAFhaaUcRqpAZgnFdwZkZcdRcea1h0= +github.com/prometheus/procfs v0.0.0-20171017214025-a6e9df898b13/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rs/cors v0.0.0-20190116175910-76f58f330d76 h1:kz+slcZ3xepXoLw24pyf3+fnc3WJITZ91IEa+PJTv2g= +github.com/rs/cors v0.0.0-20190116175910-76f58f330d76/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.14.3 h1:4EGfSkR2hJDB0s3oFfrlPqjU1e4WLncergLil3nEKW0= +github.com/rs/zerolog v1.14.3/go.mod h1:3WXPzbXEEliJ+a6UFE4vhIxV8qR1EML6ngzP9ug4eYg= +github.com/serialx/hashring v0.0.0-20180504054112-49a4782e9908 h1:RRpyb4kheanCQVyYfOhkZoD/cwClvn12RzHex2ZmHxw= +github.com/serialx/hashring v0.0.0-20180504054112-49a4782e9908/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/sunpuyo/badger v0.0.0-20181022123248-bb757672e2c7 h1:fUmKhjwU2g5XZnsDss0mC23S2jikvU94pr83ZC4U2hY= +github.com/sunpuyo/badger v0.0.0-20181022123248-bb757672e2c7/go.mod h1:NV8q9FNMv3hGs70YQN5GvBehBvuTj4XBGvUpJRXzM7g= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo= +github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg= +github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= +github.com/whyrusleeping/go-smux-multiplex v0.0.0-20190220031927-40e9838863a3 h1:tFfqe5hkHwU6l3wdXRtKSBia0ltmksfOAyTe+zjrMj8= +github.com/whyrusleeping/go-smux-multiplex v0.0.0-20190220031927-40e9838863a3/go.mod h1:34LEDbeKFZInPUrAG+bjuJmUXONGdEFW7XL0SpTY1y4= +github.com/whyrusleeping/go-smux-multistream v0.0.0-20190218181238-8e5c10881353 h1:k6nm/MGaV82oaKDTmmw/vZYbsMXOXsU4YP3vnEQ3TxI= +github.com/whyrusleeping/go-smux-multistream v0.0.0-20190218181238-8e5c10881353/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= +github.com/whyrusleeping/go-smux-yamux v0.0.0-20190220032110-28bea8f315d1 h1:bGzQr3lAlT1oV5hnCYBMTMln8RWjihn5pF6hpAnlOq0= +github.com/whyrusleeping/go-smux-yamux v0.0.0-20190220032110-28bea8f315d1/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/mafmt v1.2.8 h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA= +github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= +github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/whyrusleeping/yamux v1.1.5 h1:4CK3aUUJQu0qpKZv5gEWJjNOQtdbdDhVVS6PJ+HimdE= +github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= +github.com/willf/bitset v1.1.9 h1:GBtFynGY9ZWZmEC9sWuu41/7VBXPFCOAbCbqTflOg9c= +github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= +github.com/willf/bloom v2.0.3+incompatible/go.mod h1:MmAltL9pDMNTrvUkxdg0k0q5I0suxmuwp3KbyrZLOZ8= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180620133508-ad87a3a340fa/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20170420181420-c06e80d9300e h1:J4S0GBcCoJ2pYYVewfgT7HU8SvahFdrkNK7FRuGCZdo= +golang.org/x/time v0.0.0-20170420181420-c06e80d9300e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc= +google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/libtool/src/luajit b/libtool/src/luajit index 0343aa3b2..a42d4056c 160000 --- a/libtool/src/luajit +++ b/libtool/src/luajit @@ -1 +1 @@ -Subproject commit 0343aa3b24b06b7167c8db41440c25cafc37b97b +Subproject commit a42d4056c0dec50932d85886215292f1580746e8 diff --git a/message/blockchainmsg.go b/message/blockchainmsg.go index 634aaa3d2..ed1fa4610 100644 --- a/message/blockchainmsg.go +++ b/message/blockchainmsg.go @@ -144,7 +144,8 @@ type GetStakingRsp struct { } type GetNameInfo struct { - Name string + Name string + BlockNo types.BlockNo } type GetNameInfoRsp struct { diff --git a/message/p2pmsg.go b/message/p2pmsg.go index c120362ae..bef35fb82 100644 --- a/message/p2pmsg.go +++ b/message/p2pmsg.go @@ -20,9 +20,9 @@ var ( PeerNotFoundError = fmt.Errorf("remote peer was not found") MissingHashError = fmt.Errorf("some block hash not found") UnexpectedBlockError = fmt.Errorf("unexpected blocks response") - TooFewBlocksError = fmt.Errorf("too few blocks received that expected") - TooManyBlocksError = fmt.Errorf("too many blocks received that expected") - TooBigBlockError = fmt.Errorf("block size limit exceeded") + TooFewBlocksError = fmt.Errorf("too few blocks received that expected") + TooManyBlocksError = fmt.Errorf("too many blocks received that expected") + TooBigBlockError = fmt.Errorf("block size limit exceeded") ) // PingMsg send types.Ping to each peer. @@ -124,6 +124,7 @@ type GetPeers struct { type PeerInfo struct { Addr *types.PeerAddress + Version string Hidden bool CheckTime time.Time LastBlockHash []byte @@ -181,4 +182,14 @@ type GetHashByNoRsp struct { } type GetSelf struct { -} \ No newline at end of file +} + +type GetCluster struct { + ReplyC chan *GetClusterRsp +} + +type GetClusterRsp struct { + ChainID BlockHash + Members []*types.MemberAttr + Err error +} diff --git a/message/rpcmsg.go b/message/rpcmsg.go index fa4352be2..f11c0539b 100644 --- a/message/rpcmsg.go +++ b/message/rpcmsg.go @@ -8,4 +8,4 @@ package message // GetServerInfo for RPC to collect and type GetServerInfo struct { Categories []string -} \ No newline at end of file +} diff --git a/message/syncermsg.go b/message/syncermsg.go index 5903f6bac..b61bc3288 100644 --- a/message/syncermsg.go +++ b/message/syncermsg.go @@ -11,6 +11,7 @@ const SyncerSvc = "SyncerSvc" type SyncStart struct { PeerID peer.ID TargetNo types.BlockNo + NotifyC chan error } type FinderResult struct { diff --git a/p2p/ancestorreceiver.go b/p2p/ancestorreceiver.go index 606126b1b..732757b1d 100644 --- a/p2p/ancestorreceiver.go +++ b/p2p/ancestorreceiver.go @@ -10,7 +10,6 @@ import ( "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" "time" ) @@ -42,7 +41,7 @@ func (br *AncestorReceiver) StartGet() { } // ReceiveResp must be called just in read go routine -func (br *AncestorReceiver) ReceiveResp(msg p2pcommon.Message, msgBody proto.Message) (ret bool) { +func (br *AncestorReceiver) ReceiveResp(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) (ret bool) { ret = true // timeout if br.finished || br.timeout.Before(time.Now()) { diff --git a/p2p/ancestorreceiver_test.go b/p2p/ancestorreceiver_test.go index f62e93b72..756b17ab4 100644 --- a/p2p/ancestorreceiver_test.go +++ b/p2p/ancestorreceiver_test.go @@ -7,6 +7,7 @@ package p2p import ( "github.com/aergoio/aergo/message" + "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2pmock" "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/types" @@ -113,7 +114,7 @@ func TestAncestorReceiver_ReceiveResp(t *testing.T) { br := NewAncestorReceiver(mockActor, mockPeer, seqNo, test.input, test.ttl) br.StartGet() - msg := &V030Message{subProtocol: subproto.GetAncestorResponse, id: sampleMsgID} + msg := p2pcommon.NewMessageValue(subproto.GetAncestorResponse, sampleMsgID, p2pcommon.EmptyID, time.Now().UnixNano(), nil) body := &types.GetAncestorResponse{AncestorHash: test.blkRsp, AncestorNo: test.blkNo, Status: test.rspStatus} if test.blkInterval > 0 { time.Sleep(test.blkInterval) diff --git a/p2p/blkreceiver.go b/p2p/blkreceiver.go index ec576e8c1..c236d44d6 100644 --- a/p2p/blkreceiver.go +++ b/p2p/blkreceiver.go @@ -15,7 +15,6 @@ import ( "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" ) // BlocksChunkReceiver is send p2p getBlocksRequest to target peer and receive p2p responses till all requestes blocks are received @@ -64,7 +63,7 @@ func (br *BlocksChunkReceiver) StartGet() { } // ReceiveResp must be called just in read go routine -func (br *BlocksChunkReceiver) ReceiveResp(msg p2pcommon.Message, msgBody proto.Message) (ret bool) { +func (br *BlocksChunkReceiver) ReceiveResp(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) (ret bool) { // cases in waiting // normal not status => wait // normal status (last response) => finish @@ -86,7 +85,7 @@ func (br *BlocksChunkReceiver) ReceiveResp(msg p2pcommon.Message, msgBody proto. return } -func (br *BlocksChunkReceiver) handleInWaiting(msg p2pcommon.Message, msgBody proto.Message) { +func (br *BlocksChunkReceiver) handleInWaiting(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { // consuming request id when timeoutm, no more resp expected (i.e. hasNext == false ) or malformed body. // timeout if br.timeout.Before(time.Now()) { @@ -94,7 +93,7 @@ func (br *BlocksChunkReceiver) handleInWaiting(msg p2pcommon.Message, msgBody pr br.finishReceiver() return } - // responses malformed data will not expectec remained chunk. + // malformed responses means that later responses will be also malformed.. respBody, ok := msgBody.(types.ResponseMessage) if !ok || respBody.GetStatus() != types.ResultStatus_OK { br.cancelReceiving(message.RemotePeerFailError, false) @@ -174,7 +173,7 @@ func (br *BlocksChunkReceiver) finishReceiver() { } // ignoreMsg is silently ignore following responses, which is not useless anymore. -func (br *BlocksChunkReceiver) ignoreMsg(msg p2pcommon.Message, msgBody proto.Message) { +func (br *BlocksChunkReceiver) ignoreMsg(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { body, ok := msgBody.(*types.GetBlockResponse) if !ok { return diff --git a/p2p/blkreceiver_test.go b/p2p/blkreceiver_test.go index ac55c34ee..98ca21295 100644 --- a/p2p/blkreceiver_test.go +++ b/p2p/blkreceiver_test.go @@ -7,6 +7,7 @@ package p2p import ( "github.com/aergoio/aergo/chain" + "github.com/aergoio/aergo/p2p/p2pcommon" "testing" "time" @@ -132,7 +133,7 @@ func TestBlocksChunkReceiver_ReceiveResp(t *testing.T) { br := NewBlockReceiver(mockActor, mockPeer, seqNo, test.input, test.ttl) br.StartGet() - msg := &V030Message{subProtocol: subproto.GetBlocksResponse, id: sampleMsgID} + msg := p2pcommon.NewSimpleMsgVal(subproto.GetBlocksResponse, sampleMsgID) for i, blks := range test.blkInput { if test.blkInterval > 0 { time.Sleep(test.blkInterval) diff --git a/p2p/configs.go b/p2p/configs.go index 6b24ca7d1..c8e6ae1f2 100644 --- a/p2p/configs.go +++ b/p2p/configs.go @@ -7,12 +7,6 @@ package p2p import "time" -// constants for inter-communication of aergosvr -const ( - // other actor - DefaultActorMsgTTL = time.Second * 4 -) - // constants for peer communicating const ( // peer handshake will be failed when taken more than defaultHandshakeTTL @@ -24,10 +18,6 @@ const ( // writeMsgBufferSize is queue size of message to a peer. connection will be closed when queue is exceeded. writeMsgBufferSize = 40 - // time to determine which the remote designated peer is not working. - DesignatedNodeTTL = time.Minute * 60 - // time to determine which the remote peer is not working. - DefaultNodeTTL = time.Minute * 10 ) // constants for legacy sync algorithm. DEPRECATED newer sync loging in syncer package is used now. @@ -39,7 +29,7 @@ const ( // constants for node discovery const ( - DiscoveryQueryInterval = time.Minute * 5 + DiscoveryQueryInterval = time.Minute * 1 MaxAddrListSizePolaris = 200 MaxAddrListSizePeer = 50 @@ -64,10 +54,14 @@ const ( cachePlaceHolder = true ) -// constants about private key +// constants for block notice tuning const ( - DefaultPkKeyPrefix = "aergo-peer" - DefaultPkKeyExt = ".key" - DefaultPubKeyExt = ".pub" - DefaultPeerIDExt = ".id" + GapToSkipAll = 86400 + GapToSkipHourly = 3600 + GapToSkip5Min = 300 + + HourlyInterval = time.Hour + TenMiniteInterval = time.Minute * 10 + MinNewBlkNotiInterval = time.Second >> 2 ) + diff --git a/p2p/const_test.go b/p2p/const_test.go index d8d38acdf..3ef5c7f2e 100644 --- a/p2p/const_test.go +++ b/p2p/const_test.go @@ -9,7 +9,6 @@ import ( "encoding/base64" "encoding/hex" "fmt" - "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/internal/enc" "github.com/aergoio/aergo/p2p/p2pcommon" @@ -28,10 +27,6 @@ var dummyBlockHash, _ = hex.DecodeString("4f461d85e869ade8a0544f8313987c33a9c065 var dummyBlockHeight uint64 = 100215 var dummyTxHash, _ = enc.ToBytes("4H4zAkAyRV253K5SNBJtBxqUgHEbZcXbWFFc6cmQHY45") -var ( - myChainID, theirChainID *types.ChainID - myChainBytes, theirChainBytes []byte -) var samplePeerID peer.ID var sampleMeta p2pcommon.PeerMeta var sampleErr error @@ -40,14 +35,6 @@ var logger *log.Logger func init() { logger = log.NewLogger("test") - myChainID = types.NewChainID() - myChainID.Magic = "itSmain1" - myChainBytes, _ = myChainID.Bytes() - - theirChainID = types.NewChainID() - theirChainID.Read(myChainBytes) - theirChainID.Magic = "itsdiff2" - theirChainBytes, _ = theirChainID.Bytes() samplePeerID, _ = peer.IDB58Decode("16Uiu2HAkvvhjxVm2WE9yFBDdPQ9qx6pX9taF6TTwDNHs8VPi1EeR") sampleErr = fmt.Errorf("err in unittest") sampleMeta = p2pcommon.PeerMeta{ID: samplePeerID, IPAddress: "192.168.1.2", Port: 7845} @@ -63,7 +50,6 @@ const ( ) var sampleMsgID p2pcommon.MsgID -var sampleHeader p2pcommon.Message var sampleKey1Priv crypto.PrivKey var sampleKey1Pub crypto.PubKey var sampleKey1ID peer.ID @@ -98,7 +84,6 @@ func init() { sampleKey2ID, _ = peer.IDFromPublicKey(sampleKey2Pub) sampleMsgID = p2pcommon.NewMsgID() - sampleHeader = &V030Message{id: sampleMsgID} dummyPeerID = sampleKey1ID dummyPeerID2, _ = peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") diff --git a/p2p/handshake.go b/p2p/handshake.go index 76a0f3ec8..d03e0c55a 100644 --- a/p2p/handshake.go +++ b/p2p/handshake.go @@ -7,8 +7,9 @@ package p2p import ( "bufio" - "encoding/binary" + "context" "fmt" + "github.com/aergoio/aergo/p2p/v030" "io" "time" @@ -18,36 +19,29 @@ import ( "github.com/libp2p/go-libp2p-peer" ) -// HSHandlerFactory is creator of HSHandler -type HSHandlerFactory interface { - CreateHSHandler(outbound bool, pm p2pcommon.PeerManager, actor p2pcommon.ActorService, log *log.Logger, pid peer.ID) HSHandler -} - -// HSHandler will do handshake with remote peer -type HSHandler interface { - // Handle peer handshake till ttl, and return msgrw for this connection, and status of remote peer. - Handle(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) -} - type InboundHSHandler struct { - *PeerHandshaker + *LegacyWireHandshaker } func (ih *InboundHSHandler) Handle(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) { - return ih.handshakeInboundPeerTimeout(r, w, ttl) + ctx, cancel := context.WithTimeout(context.Background(), ttl) + defer cancel() + return ih.handshakeInboundPeer(ctx, r, w) } type OutboundHSHandler struct { - *PeerHandshaker + *LegacyWireHandshaker } func (oh *OutboundHSHandler) Handle(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) { - return oh.handshakeOutboundPeerTimeout(r, w, ttl) + ctx, cancel := context.WithTimeout(context.Background(), ttl) + defer cancel() + return oh.handshakeOutboundPeer(ctx, r, w) } -// PeerHandshaker works to handshake to just connected peer, it detect chain networks +// LegacyWireHandshaker works to handshake to just connected peer, it detect chain networks // and protocol versions, and then select InnerHandshaker for that protocol version. -type PeerHandshaker struct { +type LegacyWireHandshaker struct { pm p2pcommon.PeerManager actorServ p2pcommon.ActorService logger *log.Logger @@ -58,105 +52,75 @@ type PeerHandshaker struct { remoteStatus *types.Status } -// InnerHandshaker do handshake work and msgreadwriter for a protocol version -type innerHandshaker interface { - doForOutbound() (*types.Status, error) - doForInbound() (*types.Status, error) - GetMsgRW() p2pcommon.MsgReadWriter -} - type hsResult struct { rw p2pcommon.MsgReadWriter statusMsg *types.Status err error } -func newHandshaker(pm p2pcommon.PeerManager, actor p2pcommon.ActorService, log *log.Logger, chainID *types.ChainID, peerID peer.ID) *PeerHandshaker { - return &PeerHandshaker{pm: pm, actorServ: actor, logger: log, localChainID: chainID, peerID: peerID} -} - -func (h *PeerHandshaker) handshakeOutboundPeerTimeout(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) { - ret, err := runFuncTimeout(func(doneChan chan<- interface{}) { - rw, statusMsg, err := h.handshakeOutboundPeer(r, w) - doneChan <- &hsResult{rw: rw, statusMsg: statusMsg, err: err} - }, ttl) - if err != nil { - return nil, nil, err - } - return ret.(*hsResult).rw, ret.(*hsResult).statusMsg, ret.(*hsResult).err -} - -func (h *PeerHandshaker) handshakeInboundPeerTimeout(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) { - ret, err := runFuncTimeout(func(doneChan chan<- interface{}) { - rw, statusMsg, err := h.handshakeInboundPeer(r, w) - doneChan <- &hsResult{rw: rw, statusMsg: statusMsg, err: err} - }, ttl) - if err != nil { - return nil, nil, err - } - return ret.(*hsResult).rw, ret.(*hsResult).statusMsg, ret.(*hsResult).err -} - -type targetFunc func(chan<- interface{}) - -func runFuncTimeout(m targetFunc, ttl time.Duration) (interface{}, error) { - done := make(chan interface{}) - go m(done) - select { - case hsResult := <-done: - return hsResult, nil - case <-time.NewTimer(ttl).C: - return nil, TimeoutError - } +func newHandshaker(pm p2pcommon.PeerManager, actor p2pcommon.ActorService, log *log.Logger, chainID *types.ChainID, peerID peer.ID) *LegacyWireHandshaker { + return &LegacyWireHandshaker{pm: pm, actorServ: actor, logger: log, localChainID: chainID, peerID: peerID} } -func (h *PeerHandshaker) handshakeOutboundPeer(r io.Reader, w io.Writer) (p2pcommon.MsgReadWriter, *types.Status, error) { +func (h *LegacyWireHandshaker) handshakeOutboundPeer(ctx context.Context, r io.Reader, w io.Writer) (p2pcommon.MsgReadWriter, *types.Status, error) { bufReader, bufWriter := bufio.NewReader(r), bufio.NewWriter(w) // send initial hsmessage - hsHeader := HSHeader{Magic: p2pcommon.MAGICTest, Version: p2pcommon.P2PVersion030} + hsHeader := p2pcommon.HSHeader{Magic: p2pcommon.MAGICTest, Version: p2pcommon.P2PVersion030} sent, err := bufWriter.Write(hsHeader.Marshal()) if err != nil { return nil, nil, err } + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + // go on + } if sent != len(hsHeader.Marshal()) { return nil, nil, fmt.Errorf("transport error") } - // continue to handshake with innerHandshaker - innerHS, err := h.selectProtocolVersion(hsHeader, bufReader, bufWriter) + // continue to handshake with VersionedHandshaker + innerHS, err := h.selectProtocolVersion(hsHeader.Version, bufReader, bufWriter) if err != nil { return nil, nil, err } - status, err := innerHS.doForOutbound() + status, err := innerHS.DoForOutbound(ctx) h.remoteStatus = status return innerHS.GetMsgRW(), status, err } -func (h *PeerHandshaker) handshakeInboundPeer(r io.Reader, w io.Writer) (p2pcommon.MsgReadWriter, *types.Status, error) { - var hsHeader HSHeader +func (h *LegacyWireHandshaker) handshakeInboundPeer(ctx context.Context, r io.Reader, w io.Writer) (p2pcommon.MsgReadWriter, *types.Status, error) { + var hsHeader p2pcommon.HSHeader bufReader, bufWriter := bufio.NewReader(r), bufio.NewWriter(w) // wait initial hsmessage - headBuf := make([]byte, 8) + headBuf := make([]byte, p2pcommon.V030HSHeaderLength) read, err := h.readToLen(bufReader, headBuf, 8) if err != nil { return nil, nil, err } - if read != 8 { + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + // go on + } + if read != p2pcommon.V030HSHeaderLength { return nil, nil, fmt.Errorf("transport error") } hsHeader.Unmarshal(headBuf) - // continue to handshake with innerHandshaker - innerHS, err := h.selectProtocolVersion(hsHeader, bufReader, bufWriter) + // continue to handshake with VersionedHandshaker + innerHS, err := h.selectProtocolVersion(hsHeader.Version, bufReader, bufWriter) if err != nil { return nil, nil, err } - status, err := innerHS.doForInbound() + status, err := innerHS.DoForInbound(ctx) // send hsresponse h.remoteStatus = status return innerHS.GetMsgRW(), status, err } -func (h *PeerHandshaker) readToLen(rd io.Reader, bf []byte, max int) (int, error) { +func (h *LegacyWireHandshaker) readToLen(rd io.Reader, bf []byte, max int) (int, error) { remain := max offset := 0 for remain > 0 { @@ -170,57 +134,13 @@ func (h *PeerHandshaker) readToLen(rd io.Reader, bf []byte, max int) (int, error return offset, nil } -func createStatusMsg(pm p2pcommon.PeerManager, actorServ p2pcommon.ActorService, chainID *types.ChainID) (*types.Status, error) { - // find my best block - bestBlock, err := actorServ.GetChainAccessor().GetBestBlock() - if err != nil { - return nil, err - } - selfAddr := pm.SelfMeta().ToPeerAddress() - chainIDbytes, err := chainID.Bytes() - if err != nil { - return nil, err - } - // create message data - statusMsg := &types.Status{ - Sender: &selfAddr, - ChainID: chainIDbytes, - BestBlockHash: bestBlock.BlockHash(), - BestHeight: bestBlock.GetHeader().GetBlockNo(), - NoExpose: pm.SelfMeta().Hidden, - } - - return statusMsg, nil -} - -func (h *PeerHandshaker) selectProtocolVersion(head HSHeader, r *bufio.Reader, w *bufio.Writer) (innerHandshaker, error) { - switch head.Version { +func (h *LegacyWireHandshaker) selectProtocolVersion(version p2pcommon.P2PVersion, r *bufio.Reader, w *bufio.Writer) (p2pcommon.VersionedHandshaker, error) { + switch version { case p2pcommon.P2PVersion030: - v030 := newV030StateHS(h.pm, h.actorServ, h.logger, h.localChainID, h.peerID, r, w) - return v030, nil + v030hs := v030.NewV030StateHS(h.pm, h.actorServ, h.logger, h.localChainID, h.peerID, r, w) + return v030hs, nil default: return nil, fmt.Errorf("not supported version") } } -func (h *PeerHandshaker) checkProtocolVersion(versionStr string) error { - // TODO modify interface and put check code here - return nil -} - -type HSHeader struct { - Magic uint32 - Version uint32 -} - -func (h HSHeader) Marshal() []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint32(b, h.Magic) - binary.BigEndian.PutUint32(b[4:], h.Version) - return b -} - -func (h *HSHeader) Unmarshal(b []byte) { - h.Magic = binary.BigEndian.Uint32(b) - h.Version = binary.BigEndian.Uint32(b[4:]) -} diff --git a/p2p/handshake_test.go b/p2p/handshake_test.go index b5b2d4676..47daf436c 100644 --- a/p2p/handshake_test.go +++ b/p2p/handshake_test.go @@ -7,9 +7,13 @@ package p2p import ( "bufio" - "bytes" + "context" "fmt" + "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/p2p/p2pkey" + peer "github.com/libp2p/go-libp2p-peer" "reflect" + "strings" "testing" "time" @@ -22,42 +26,25 @@ import ( "github.com/aergoio/aergo/types" ) -func Test_runFuncTimeout(t *testing.T) { - type args struct { - m targetFunc - ttl time.Duration - } - tests := []struct { - name string - args args - want interface{} - wantErr bool - }{ - {"Tnorm", args{func(done chan<- interface{}) { - done <- "success" - }, time.Millisecond * 10}, "success", false}, - {"Tnorm2", args{func(done chan<- interface{}) { - done <- -3 - }, time.Millisecond * 10}, -3, false}, - {"Ttimeout1", args{func(done chan<- interface{}) { - time.Sleep(time.Millisecond * 11) - }, time.Millisecond * 10}, nil, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := runFuncTimeout(tt.args.m, tt.args.ttl) - if (err != nil) != tt.wantErr { - t.Errorf("runFuncTimeout() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("runFuncTimeout() = %v, want %v", got, tt.want) - } - }) - } +const ( + sampleKeyFile = "../test/sample.key" +) + +var ( + // sampleID matches the key defined in test config file + sampleID peer.ID +) + +func init() { + sampleID = "16Uiu2HAmP2iRDpPumUbKhNnEngoxAUQWBmCyn7FaYUrkaDAMXJPJ" + baseCfg := &config.BaseConfig{AuthDir: "test"} + p2pCfg := &config.P2PConfig{NPKey: sampleKeyFile} + p2pkey.InitNodeInfo(baseCfg, p2pCfg, "0.0.1-test", logger) } func TestPeerHandshaker_handshakeOutboundPeerTimeout(t *testing.T) { + var myChainID = &types.ChainID{Magic:"itSmain1"} + ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -93,14 +80,16 @@ func TestPeerHandshaker_handshakeOutboundPeerTimeout(t *testing.T) { time.Sleep(tt.delay) return -1, fmt.Errorf("must not reach") }) - - _, got, err := h.handshakeOutboundPeerTimeout(mockReader, mockWriter, time.Millisecond*50) - if err != TimeoutError { - t.Errorf("PeerHandshaker.handshakeOutboundPeer() error = %v, wantErr %v", err, TimeoutError) + ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + defer cancel() + _, got, err := h.handshakeOutboundPeer(ctx, mockReader, mockWriter) + //_, got, err := h.handshakeOutboundPeerTimeout(mockReader, mockWriter, time.Millisecond*50) + if !strings.Contains(err.Error(),"context deadline exceeded") { + t.Errorf("LegacyWireHandshaker.handshakeOutboundPeer() error = %v, wantErr %v", err, "context deadline exceeded") return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("PeerHandshaker.handshakeOutboundPeer() = %v, want %v", got, tt.want) + t.Errorf("LegacyWireHandshaker.handshakeOutboundPeer() = %v, want %v", got, tt.want) } }) } @@ -116,12 +105,12 @@ func TestPeerHandshaker_Select(t *testing.T) { tests := []struct { name string - hsheader HSHeader + hsheader p2pcommon.HSHeader wantErr bool }{ - {"TVer030", HSHeader{p2pcommon.MAGICMain, p2pcommon.P2PVersion030}, false}, - {"Tver020", HSHeader{p2pcommon.MAGICMain, 0x00000200}, true}, - {"TInavlid", HSHeader{p2pcommon.MAGICMain, 0x000001}, true}, + {"TVer030", p2pcommon.HSHeader{p2pcommon.MAGICMain, p2pcommon.P2PVersion030}, false}, + {"Tver020", p2pcommon.HSHeader{p2pcommon.MAGICMain, 0x00000200}, true}, + {"TInavlid", p2pcommon.HSHeader{p2pcommon.MAGICMain, 0x000001}, true}, // TODO: test cases } for _, test := range tests { @@ -131,7 +120,7 @@ func TestPeerHandshaker_Select(t *testing.T) { h := newHandshaker(mockPM, mockActor, logger, nil, samplePeerID) - actual, err := h.selectProtocolVersion(test.hsheader, bufio.NewReader(mockReader), + actual, err := h.selectProtocolVersion(test.hsheader.Version, bufio.NewReader(mockReader), bufio.NewWriter(mockWriter)) assert.Equal(t, test.wantErr, err != nil) if !test.wantErr { @@ -140,27 +129,3 @@ func TestPeerHandshaker_Select(t *testing.T) { }) } } - -func TestHSHeader_Marshal(t *testing.T) { - tests := []struct { - name string - input []byte - expectedNewwork uint32 - expectedVersion uint32 - }{ - {"TMain030", []byte{0x047, 0x041, 0x68, 0x41, 0, 0, 3, 0}, p2pcommon.MAGICMain, p2pcommon.P2PVersion030}, - {"TMain020", []byte{0x02e, 0x041, 0x54, 0x29, 0, 1, 3, 5}, p2pcommon.MAGICTest, 0x010305}, - // TODO: test cases - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - hs := HSHeader{} - hs.Unmarshal(test.input) - assert.Equal(t, test.expectedNewwork, hs.Magic) - assert.Equal(t, test.expectedVersion, hs.Version) - - actualBytes := hs.Marshal() - assert.True(t, bytes.Equal(test.input, actualBytes)) - }) - } -} diff --git a/p2p/handshakev2.go b/p2p/handshakev2.go new file mode 100644 index 000000000..379627906 --- /dev/null +++ b/p2p/handshakev2.go @@ -0,0 +1,282 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "bufio" + "context" + "encoding/binary" + "fmt" + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" + v030 "github.com/aergoio/aergo/p2p/v030" + "github.com/aergoio/aergo/types" + peer "github.com/libp2p/go-libp2p-peer" + "io" + "time" +) + +// CurrentSupported is list of versions this aergosvr supports. The first is the best recommended version. +var CurrentSupported = []p2pcommon.P2PVersion{p2pcommon.P2PVersion031, p2pcommon.P2PVersion030} + +// baseWireHandshaker works to handshake to just connected peer, it detect chain networks +// and protocol versions, and then select InnerHandshaker for that protocol version. +type baseWireHandshaker struct { + pm p2pcommon.PeerManager + actor p2pcommon.ActorService + verM p2pcommon.VersionedManager + logger *log.Logger + peerID peer.ID + // check if is it adhoc + localChainID *types.ChainID + + remoteStatus *types.Status +} + +type InboundWireHandshaker struct { + baseWireHandshaker +} + +func NewInbountHSHandler(pm p2pcommon.PeerManager, actor p2pcommon.ActorService, verManager p2pcommon.VersionedManager, log *log.Logger, chainID *types.ChainID, peerID peer.ID) p2pcommon.HSHandler { + return &InboundWireHandshaker{baseWireHandshaker{pm: pm, actor: actor, verM:verManager, logger: log, localChainID: chainID, peerID: peerID}} +} + +func (h *InboundWireHandshaker) Handle(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) { + ctx, cancel := context.WithTimeout(context.Background(), ttl) + defer cancel() + bufReader, bufWriter := bufio.NewReader(r), bufio.NewWriter(w) + return h.handleInboundPeer(ctx, bufReader, bufWriter) +} + +func (h *InboundWireHandshaker) handleInboundPeer(ctx context.Context, rd io.Reader, wr p2pcommon.FlushableWriter) (p2pcommon.MsgReadWriter, *types.Status, error) { + // wait initial hsmessage + hsReq, err := h.readWireHSRequest(rd) + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + // go on + } + if err != nil { + return h.writeErrAndReturn(err, p2pcommon.ErrWrongHSReq, wr) + } + // check magic + if hsReq.Magic != p2pcommon.MAGICMain { + return h.writeErrAndReturn(fmt.Errorf("wrong magic %v",hsReq.Magic), p2pcommon.ErrWrongHSReq, wr) + } + + // continue to handshake with VersionedHandshaker + bestVer := h.verM.FindBestP2PVersion(hsReq.Versions) + if bestVer == p2pcommon.P2PVersionUnknown { + return h.writeErrAndReturn(fmt.Errorf("no matchied p2p version for %v", hsReq.Versions), p2pcommon.ErrNoMatchedVersion,wr) + } else { + resp := p2pcommon.HSHeadResp{hsReq.Magic, bestVer.Uint32()} + err = h.writeWireHSResponse(resp, wr) + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + // go on + } + if err != nil { + return nil, nil, err + } + } + innerHS, err := h.verM.GetVersionedHandshaker(bestVer, h.peerID, rd, wr) + if err != nil { + return nil, nil, err + } + status, err := innerHS.DoForInbound(ctx) + // send hsresponse + h.remoteStatus = status + return innerHS.GetMsgRW(), status, err +} + +type OutboundWireHandshaker struct { + baseWireHandshaker +} + +func NewOutbountHSHandler(pm p2pcommon.PeerManager, actor p2pcommon.ActorService, verManager p2pcommon.VersionedManager, log *log.Logger, chainID *types.ChainID, peerID peer.ID) p2pcommon.HSHandler { + return &OutboundWireHandshaker{baseWireHandshaker{pm: pm, actor: actor, verM:verManager, logger: log, localChainID: chainID, peerID: peerID}} +} + +func (h *OutboundWireHandshaker) Handle(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) { + ctx, cancel := context.WithTimeout(context.Background(), ttl) + defer cancel() + bufReader, bufWriter := bufio.NewReader(r), bufio.NewWriter(w) + return h.handleOutboundPeer(ctx, bufReader, bufWriter) +} + +func (h *OutboundWireHandshaker) handleOutboundPeer(ctx context.Context, bufReader io.Reader, bufWriter p2pcommon.FlushableWriter) (p2pcommon.MsgReadWriter, *types.Status, error) { + // send initial hsmessage + versions := []p2pcommon.P2PVersion{ + p2pcommon.P2PVersion031, + p2pcommon.P2PVersion030, + } + hsHeader := p2pcommon.HSHeadReq{Magic: p2pcommon.MAGICMain, Versions: versions} + err := h.writeWireHSRequest(hsHeader, bufWriter) + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + // go on + } + if err != nil { + return nil, nil, err + } + + // read response + respHeader, err := h.readWireHSResp(bufReader) + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + default: + // go on + } + if err != nil { + return nil, nil, err + } + // check response + if respHeader.Magic != hsHeader.Magic { + return nil, nil, fmt.Errorf("remote peer failed: %v", respHeader.RespCode) + } + bestVersion := p2pcommon.P2PVersion(respHeader.RespCode) + // continue to handshake with VersionedHandshaker + innerHS, err := h.verM.GetVersionedHandshaker(bestVersion, h.peerID, bufReader, bufWriter) + if err != nil { + return nil, nil, err + } + status, err := innerHS.DoForOutbound(ctx) + h.remoteStatus = status + return innerHS.GetMsgRW(), status, err +} + +func (h *baseWireHandshaker) writeWireHSRequest(hsHeader p2pcommon.HSHeadReq, wr p2pcommon.FlushableWriter) (err error) { + bytes := hsHeader.Marshal() + sent, err := wr.Write(bytes) + if err != nil { + return + } + err = wr.Flush() + if err != nil { + return + } + if sent != len(bytes) { + return fmt.Errorf("wrong sent size") + } + return +} + +func (h *baseWireHandshaker) readWireHSRequest(rd io.Reader) (header p2pcommon.HSHeadReq, err error) { + buf := make([]byte, p2pcommon.HSMagicLength) + readn, err := p2putil.ReadToLen(rd, buf[:p2pcommon.HSMagicLength]) + if err != nil { + return + } + if readn != p2pcommon.HSMagicLength { + err = fmt.Errorf("transport error") + return + } + header.Magic = binary.BigEndian.Uint32(buf) + readn, err = p2putil.ReadToLen(rd, buf[:p2pcommon.HSVerCntLength]) + if err != nil { + return + } + if readn != p2pcommon.HSVerCntLength { + err = fmt.Errorf("transport error") + return + } + verCount := int(binary.BigEndian.Uint32(buf)) + if verCount <= 0 || verCount > p2pcommon.HSMaxVersionCnt { + err = fmt.Errorf("invalid version count: %d", verCount) + return + } + versions := make([]p2pcommon.P2PVersion, verCount) + for i := 0; i < verCount; i++ { + readn, err = p2putil.ReadToLen(rd, buf[:p2pcommon.HSVersionLength]) + if err != nil { + return + } + if readn != p2pcommon.HSVersionLength { + err = fmt.Errorf("transport error") + return + } + versions[i] = p2pcommon.P2PVersion(binary.BigEndian.Uint32(buf)) + } + header.Versions = versions + return +} + +func (h *baseWireHandshaker) writeWireHSResponse(hsHeader p2pcommon.HSHeadResp, wr p2pcommon.FlushableWriter) (err error) { + bytes := hsHeader.Marshal() + sent, err := wr.Write(bytes) + if err != nil { + return + } + err = wr.Flush() + if err != nil { + return + } + if sent != len(bytes) { + return fmt.Errorf("wrong sent size") + } + return +} + +func (h *baseWireHandshaker) writeErrAndReturn(err error, errCode uint32, wr p2pcommon.FlushableWriter) (p2pcommon.MsgReadWriter, *types.Status, error) { + errResp := p2pcommon.HSHeadResp{p2pcommon.HSError, errCode} + _ = h.writeWireHSResponse(errResp, wr) + return nil, nil, err +} +func (h *baseWireHandshaker) readWireHSResp(rd io.Reader) (header p2pcommon.HSHeadResp, err error) { + bytebuf := make([]byte, p2pcommon.HSMagicLength) + readn, err := p2putil.ReadToLen(rd, bytebuf[:p2pcommon.HSMagicLength]) + if err != nil { + return + } + if readn != p2pcommon.HSMagicLength { + err = fmt.Errorf("transport error") + return + } + header.Magic = binary.BigEndian.Uint32(bytebuf) + readn, err = p2putil.ReadToLen(rd, bytebuf[:p2pcommon.HSVersionLength]) + if err != nil { + return + } + if readn != p2pcommon.HSVersionLength { + err = fmt.Errorf("transport error") + return + } + header.RespCode = binary.BigEndian.Uint32(bytebuf) + return +} + +func (h *baseWireHandshaker) selectVersionedHandshaker(version p2pcommon.P2PVersion, r io.Reader, w io.Writer) (p2pcommon.VersionedHandshaker, error) { + switch version { + // p2p version 0.3.1 is just changed in initial connecting and version verification. chain verification is same as 0.3.0 + case p2pcommon.P2PVersion031: + // TODO: + v030hs := v030.NewV030StateHS(h.pm, h.actor, h.logger, h.localChainID, h.peerID, r, w) + return v030hs, nil + case p2pcommon.P2PVersion030: + v030hs := v030.NewV030StateHS(h.pm, h.actor, h.logger, h.localChainID, h.peerID, r, w) + return v030hs, nil + default: + return nil, fmt.Errorf("not supported version") + } +} + +func (h *baseWireHandshaker) findBestProtocolVersion(versions []p2pcommon.P2PVersion) p2pcommon.P2PVersion { + for _, suppored := range CurrentSupported { + for _, reqVer := range versions { + if suppored == reqVer { + return reqVer + } + } + } + return p2pcommon.P2PVersionUnknown +} diff --git a/p2p/handshakev2_test.go b/p2p/handshakev2_test.go new file mode 100644 index 000000000..283a99eb5 --- /dev/null +++ b/p2p/handshakev2_test.go @@ -0,0 +1,311 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "bufio" + "bytes" + "context" + "reflect" + "sync/atomic" + "testing" + "time" + + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/aergoio/aergo/types" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" +) + +func Test_baseWireHandshaker_writeWireHSRequest(t *testing.T) { + tests := []struct { + name string + args p2pcommon.HSHeadReq + wantErr bool + wantSize int + wantErr2 bool + }{ + {"TEmpty", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, nil}, false, 8, true}, + {"TSingle", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{p2pcommon.P2PVersion031}}, false, 12, false}, + {"TMulti", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{0x033333, 0x092fa10, p2pcommon.P2PVersion031, p2pcommon.P2PVersion030}}, false, 24, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := &baseWireHandshaker{} + buffer := bytes.NewBuffer(nil) + wr := bufio.NewWriter(buffer) + err := h.writeWireHSRequest(tt.args, wr) + if (err != nil) != tt.wantErr { + t.Errorf("baseWireHandshaker.writeWireHSRequest() error = %v, wantErr %v", err, tt.wantErr) + } + if buffer.Len() != tt.wantSize { + t.Errorf("baseWireHandshaker.writeWireHSRequest() error = %v, wantErr %v", buffer.Len(), tt.wantSize) + } + + got, err2 := h.readWireHSRequest(buffer) + if (err2 != nil) != tt.wantErr2 { + t.Errorf("baseWireHandshaker.readWireHSRequest() error = %v, wantErr %v", err2, tt.wantErr2) + } + if !reflect.DeepEqual(tt.args, got) { + t.Errorf("baseWireHandshaker.readWireHSRequest() = %v, want %v", got, tt.args) + } + if buffer.Len() != 0 { + t.Errorf("baseWireHandshaker.readWireHSRequest() error = %v, wantErr %v", buffer.Len(), 0) + } + + }) + } +} + +func Test_baseWireHandshaker_writeWireHSResponse(t *testing.T) { + tests := []struct { + name string + args p2pcommon.HSHeadResp + wantErr bool + wantSize int + wantErr2 bool + }{ + {"TSingle", p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion030.Uint32()}, false, 8, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := &baseWireHandshaker{} + buffer := bytes.NewBuffer(nil) + wr := bufio.NewWriter(buffer) + err := h.writeWireHSResponse(tt.args, wr) + if (err != nil) != tt.wantErr { + t.Errorf("baseWireHandshaker.writeWireHSRequest() error = %v, wantErr %v", err, tt.wantErr) + } + if buffer.Len() != tt.wantSize { + t.Errorf("baseWireHandshaker.writeWireHSRequest() error = %v, wantErr %v", buffer.Len(), tt.wantSize) + } + + got, err2 := h.readWireHSResp(buffer) + if (err2 != nil) != tt.wantErr2 { + t.Errorf("baseWireHandshaker.readWireHSRequest() error = %v, wantErr %v", err2, tt.wantErr2) + } + if !reflect.DeepEqual(tt.args, got) { + t.Errorf("baseWireHandshaker.readWireHSRequest() = %v, want %v", got, tt.args) + } + if buffer.Len() != 0 { + t.Errorf("baseWireHandshaker.readWireHSRequest() error = %v, wantErr %v", buffer.Len(), 0) + } + + }) + } +} + +func TestInboundWireHandshker_handleInboundPeer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + sampleChainID := &types.ChainID{} + sampleStatus := &types.Status{} + logger := log.NewLogger("p2p.test") + sampleEmptyHSReq := p2pcommon.HSHeadReq{p2pcommon.MAGICMain, nil} + sampleEmptyHSResp := p2pcommon.HSHeadResp{p2pcommon.HSError, p2pcommon.ErrWrongHSReq} + + type args struct { + r []byte + } + tests := []struct { + name string + in []byte + + bestVer p2pcommon.P2PVersion + ctxCancel int // 0 is not , 1 is during read, 2 is during write + vhErr bool // version handshaker failed + + wantW []byte // sent header + wantErr bool + }{ + // All valid + {"TCurrentVersion", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{p2pcommon.P2PVersion031, p2pcommon.P2PVersion030, 0x000101}}.Marshal(), p2pcommon.P2PVersion031, 0, false, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion031.Uint32()}.Marshal(), false}, + {"TOldVersion", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{0x000010, p2pcommon.P2PVersion030, 0x000101}}.Marshal(), p2pcommon.P2PVersion030, 0, false, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion030.Uint32()}.Marshal(), false}, + // wrong io read + {"TWrongRead", sampleEmptyHSReq.Marshal()[:7], p2pcommon.P2PVersion031, 0, false, sampleEmptyHSResp.Marshal(), true}, + // empty version + {"TEmptyVersion", sampleEmptyHSReq.Marshal(), p2pcommon.P2PVersion031, 0, false, sampleEmptyHSResp.Marshal(), true}, + // wrong io write + // {"TWrongWrite", sampleEmptyHSReq.Marshal()[:7], sampleEmptyHSResp.Marshal(), true }, + // wrong magic + {"TWrongMagic", p2pcommon.HSHeadReq{0x0001, []p2pcommon.P2PVersion{p2pcommon.P2PVersion031}}.Marshal(), p2pcommon.P2PVersion031, 0, false, sampleEmptyHSResp.Marshal(), true}, + // not supported version (or wrong version) + {"TNoVersion", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{0x000010, 0x030405, 0x000101}}.Marshal(), p2pcommon.P2PVersionUnknown, 0, false, p2pcommon.HSHeadResp{p2pcommon.HSError, p2pcommon.ErrNoMatchedVersion}.Marshal(), true}, + // protocol handshake failed + {"TVersionHSFailed", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{p2pcommon.P2PVersion031, p2pcommon.P2PVersion030, 0x000101}}.Marshal(), p2pcommon.P2PVersion031, 0, true, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion031.Uint32()}.Marshal(), true}, + + // timeout while read, no reply to remote + {"TTimeoutRead", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{p2pcommon.P2PVersion031, p2pcommon.P2PVersion030, 0x000101}}.Marshal(), p2pcommon.P2PVersion031, 1, false, []byte{}, true}, + // timeout while writing, sent but remote not receiving fast + {"TTimeoutWrite", p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{p2pcommon.P2PVersion031, p2pcommon.P2PVersion030, 0x000101}}.Marshal(), p2pcommon.P2PVersion031, 2, false, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion031.Uint32()}.Marshal(), true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockPM := p2pmock.NewMockPeerManager(ctrl) + mockActor := p2pmock.NewMockActorService(ctrl) + mockVM := p2pmock.NewMockVersionedManager(ctrl) + mockVH := p2pmock.NewMockVersionedHandshaker(ctrl) + + mockCtx := NewContextTestDouble(tt.ctxCancel) // TODO make mock + wbuf := bytes.NewBuffer(nil) + dummyReader := bufio.NewReader(bytes.NewBuffer(tt.in)) + dummyWriter := bufio.NewWriter(wbuf) + dummyMsgRW := p2pmock.NewMockMsgReadWriter(ctrl) + + mockVM.EXPECT().FindBestP2PVersion(gomock.Any()).Return(tt.bestVer).MaxTimes(1) + mockVM.EXPECT().GetVersionedHandshaker(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(mockVH, nil).MaxTimes(1) + if !tt.vhErr { + mockVH.EXPECT().DoForInbound(mockCtx).Return(sampleStatus, nil).MaxTimes(1) + mockVH.EXPECT().GetMsgRW().Return(dummyMsgRW).MaxTimes(1) + } else { + mockVH.EXPECT().DoForInbound(mockCtx).Return(nil, errors.New("version hs failed")).MaxTimes(1) + mockVH.EXPECT().GetMsgRW().Return(nil).MaxTimes(1) + } + + h := NewInbountHSHandler(mockPM, mockActor, mockVM, logger, sampleChainID, samplePeerID).(*InboundWireHandshaker) + got, got1, err := h.handleInboundPeer(mockCtx, dummyReader, dummyWriter) + if (err != nil) != tt.wantErr { + t.Errorf("InboundWireHandshaker.handleInboundPeer() error = %v, wantErr %v", err, tt.wantErr) + } + if !bytes.Equal(wbuf.Bytes(), tt.wantW) { + t.Errorf("InboundWireHandshaker.handleInboundPeer() send resp %v, want %v", wbuf.Bytes(), tt.wantW) + } + if !tt.wantErr { + if got == nil { + t.Errorf("InboundWireHandshaker.handleInboundPeer() got msgrw nil, want not") + } + if got1 == nil { + t.Errorf("InboundWireHandshaker.handleInboundPeer() got status nil, want not") + } + } + }) + } +} + +func TestOutboundWireHandshaker_handleOutboundPeer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + sampleChainID := &types.ChainID{} + sampleStatus := &types.Status{} + logger := log.NewLogger("p2p.test") + outBytes := p2pcommon.HSHeadReq{p2pcommon.MAGICMain, []p2pcommon.P2PVersion{p2pcommon.P2PVersion031, p2pcommon.P2PVersion030}}.Marshal() + + type args struct { + r []byte + } + tests := []struct { + name string + + remoteBestVer p2pcommon.P2PVersion + ctxCancel int // 0 is not , 1 is during write, 2 is during read + vhErr bool // version handshaker failed + receingBuf []byte // received resp + + wantErr bool + }{ + // remote listening peer accept my best p2p version + {"TCurrentVersion", p2pcommon.P2PVersion031, 0, false, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion031.Uint32()}.Marshal(), false}, + // remote listening peer can connect, but old p2p version + {"TOldVersion", p2pcommon.P2PVersion030, 0, false, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion030.Uint32()}.Marshal(), false}, + // wrong io read + {"TWrongResp", p2pcommon.P2PVersion031, 0, false, outBytes[:6], true}, + // {"TWrongWrite", sampleEmptyHSReq.Marshal()[:7], sampleEmptyHSResp.Marshal(), true }, + // wrong magic + {"TWrongMagic", p2pcommon.P2PVersion031, 0, false, p2pcommon.HSHeadResp{p2pcommon.HSError, p2pcommon.ErrWrongHSReq}.Marshal(), true}, + // not supported version (or wrong version) + {"TNoVersion", p2pcommon.P2PVersionUnknown, 0, false, p2pcommon.HSHeadResp{p2pcommon.HSError, p2pcommon.ErrNoMatchedVersion}.Marshal(), true}, + // protocol handshake failed + {"TVersionHSFailed", p2pcommon.P2PVersion031, 0, true, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion031.Uint32()}.Marshal(), true}, + + // timeout while read, no reply to remote + {"TTimeoutRead", p2pcommon.P2PVersion031, 1, false, []byte{}, true}, + // timeout while writing, sent but remote not receiving fast + {"TTimeoutWrite", p2pcommon.P2PVersion031, 2, false, p2pcommon.HSHeadResp{p2pcommon.MAGICMain, p2pcommon.P2PVersion031.Uint32()}.Marshal(), true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockPM := p2pmock.NewMockPeerManager(ctrl) + mockActor := p2pmock.NewMockActorService(ctrl) + mockVM := p2pmock.NewMockVersionedManager(ctrl) + mockVH := p2pmock.NewMockVersionedHandshaker(ctrl) + + mockCtx := NewContextTestDouble(tt.ctxCancel) // TODO make mock + wbuf := bytes.NewBuffer(nil) + dummyReader := bufio.NewReader(bytes.NewBuffer(tt.receingBuf)) + dummyWriter := bufio.NewWriter(wbuf) + dummyMsgRW := p2pmock.NewMockMsgReadWriter(ctrl) + + mockVM.EXPECT().GetVersionedHandshaker(tt.remoteBestVer, gomock.Any(), gomock.Any(), gomock.Any()).Return(mockVH, nil).MaxTimes(1) + if !tt.vhErr { + mockVH.EXPECT().DoForOutbound(mockCtx).Return(sampleStatus, nil).MaxTimes(1) + mockVH.EXPECT().GetMsgRW().Return(dummyMsgRW).MaxTimes(1) + } else { + mockVH.EXPECT().DoForOutbound(mockCtx).Return(nil, errors.New("version hs failed")).MaxTimes(1) + mockVH.EXPECT().GetMsgRW().Return(nil).MaxTimes(1) + } + + h := NewOutbountHSHandler(mockPM, mockActor, mockVM, logger, sampleChainID, samplePeerID).(*OutboundWireHandshaker) + got, got1, err := h.handleOutboundPeer(mockCtx, dummyReader, dummyWriter) + if (err != nil) != tt.wantErr { + t.Errorf("OutboundWireHandshaker.handleOutboundPeer() error = %v, wantErr %v", err, tt.wantErr) + } + if !bytes.Equal(wbuf.Bytes(), outBytes) { + t.Errorf("OutboundWireHandshaker.handleOutboundPeer() send resp %v, want %v", wbuf.Bytes(), tt.receingBuf) + } + if !tt.wantErr { + if got == nil { + t.Errorf("OutboundWireHandshaker.handleOutboundPeer() got msgrw nil, want not") + } + if got1 == nil { + t.Errorf("OutboundWireHandshaker.handleOutboundPeer() got status nil, want not") + } + } + }) + } +} + +type ContextTestDouble struct { + doneChannel chan struct{} + expire uint32 + callCnt uint32 +} + +var _ context.Context = (*ContextTestDouble)(nil) + +func NewContextTestDouble(expire int) *ContextTestDouble { + if expire <= 0 { + expire = 9999999 + } + return &ContextTestDouble{expire: uint32(expire), doneChannel: make(chan struct{}, 1)} +} + +func (*ContextTestDouble) Deadline() (deadline time.Time, ok bool) { + panic("implement me") +} + +func (c *ContextTestDouble) Done() <-chan struct{} { + current := atomic.AddUint32(&c.callCnt, 1) + if current >= c.expire { + c.doneChannel <- struct{}{} + } + return c.doneChannel +} + +func (c *ContextTestDouble) Err() error { + if atomic.LoadUint32(&c.callCnt) >= c.expire { + return errors.New("timeout") + } else { + return nil + } +} + +func (*ContextTestDouble) Value(key interface{}) interface{} { + panic("implement me") +} diff --git a/p2p/hashbynoreceiver.go b/p2p/hashbynoreceiver.go index 3d9edd629..f96d81614 100644 --- a/p2p/hashbynoreceiver.go +++ b/p2p/hashbynoreceiver.go @@ -12,7 +12,6 @@ import ( "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" ) // BlocksChunkReceiver is send p2p getBlocksRequest to target peer and receive p2p responses till all requestes blocks are received @@ -45,7 +44,7 @@ func (br *BlockHashByNoReceiver) StartGet() { } // ReceiveResp must be called just in read go routine -func (br *BlockHashByNoReceiver) ReceiveResp(msg p2pcommon.Message, msgBody proto.Message) (ret bool) { +func (br *BlockHashByNoReceiver) ReceiveResp(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) (ret bool) { ret = true // timeout if br.finished || br.timeout.Before(time.Now()) { diff --git a/p2p/hashbynoreceiver_test.go b/p2p/hashbynoreceiver_test.go index 56bba6aa2..c8842c2a1 100644 --- a/p2p/hashbynoreceiver_test.go +++ b/p2p/hashbynoreceiver_test.go @@ -6,6 +6,7 @@ package p2p import ( + "github.com/aergoio/aergo/p2p/p2pcommon" "testing" "time" @@ -112,7 +113,7 @@ func TestBlockHashByNoReceiver_ReceiveResp(t *testing.T) { br := NewBlockHashByNoReceiver(mockActor, mockPeer, seqNo, test.input, test.ttl) br.StartGet() - msg := &V030Message{subProtocol: subproto.GetHashByNoResponse, id: sampleMsgID} + msg := p2pcommon.NewSimpleMsgVal(subproto.GetHashByNoResponse, sampleMsgID) body := &types.GetHashByNoResponse{BlockHash: test.blkRsp, Status: test.rspStatus} if test.blkInterval > 0 { time.Sleep(test.blkInterval) diff --git a/p2p/hashreceiver.go b/p2p/hashreceiver.go index 9fe3105fd..67885a873 100644 --- a/p2p/hashreceiver.go +++ b/p2p/hashreceiver.go @@ -12,11 +12,10 @@ import ( "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" ) -// BlocksChunkReceiver is send p2p getBlocksRequest to target peer and receive p2p responses till all requestes blocks are received -// It will send response actor message if all blocks are received or failed to receive, but not send response if timeout expired. +// BlockHashesReceiver is send p2p GetHashesRequest to target peer and receive p2p responses till all requested hashes are received +// It will send response actor message if all hashes are received or failed to receive, but not send response if timeout expired. type BlockHashesReceiver struct { syncerSeq uint64 requestID p2pcommon.MsgID @@ -28,14 +27,16 @@ type BlockHashesReceiver struct { count int timeout time.Time finished bool + status receiverStatus got []message.BlockHash offset int + senderFinished chan interface{} } func NewBlockHashesReceiver(actor p2pcommon.ActorService, peer p2pcommon.RemotePeer, seq uint64, req *message.GetHashes, ttl time.Duration) *BlockHashesReceiver { timeout := time.Now().Add(ttl) - return &BlockHashesReceiver{syncerSeq:seq, actor: actor, peer: peer, prevBlock: req.PrevInfo, count: int(req.Count), timeout: timeout, got: make([]message.BlockHash, 0, int(req.Count))} + return &BlockHashesReceiver{syncerSeq:seq, actor: actor, peer: peer, prevBlock: req.PrevInfo, count: int(req.Count), timeout: timeout, got: make([]message.BlockHash, int(req.Count))} } func (br *BlockHashesReceiver) StartGet() { @@ -47,48 +48,108 @@ func (br *BlockHashesReceiver) StartGet() { } // ReceiveResp must be called just in read go routine -func (br *BlockHashesReceiver) ReceiveResp(msg p2pcommon.Message, msgBody proto.Message) (ret bool) { +func (br *BlockHashesReceiver) ReceiveResp(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) (ret bool) { + // TODO this code is exact copy of BlocksChunkReceiver, so be lots of other codes in this file. consider refactoring ret = true + switch br.status { + case receiverStatusWaiting: + br.handleInWaiting(msg, msgBody) + case receiverStatusCanceled: + br.ignoreMsg(msg, msgBody) + return + case receiverStatusFinished: + fallthrough + default: + return + } + return +} + +func (br *BlockHashesReceiver) handleInWaiting(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { + // consuming request id when timeoutm, no more resp expected (i.e. hasNext == false ) or malformed body. // timeout - if br.finished || br.timeout.Before(time.Now()) { - // silently ignore already finished job - //br.actor.TellRequest(message.SyncerSvc,&message.GetBlockChunksRsp{ToWhom:br.peer.ID(), Err:message.RemotePeerFailError}) - br.finished = true - br.peer.ConsumeRequest(br.requestID) + if br.timeout.Before(time.Now()) { + // silently ignore already status job + br.finishReceiver() + return + } + // malformed responses means that later responses will be also malformed.. + respBody, ok := msgBody.(types.ResponseMessage) + if !ok || respBody.GetStatus() != types.ResultStatus_OK { + br.cancelReceiving(message.RemotePeerFailError, false) return } + // remote peer response failure - body := msgBody.(*types.GetHashesResponse) - if body.Status != types.ResultStatus_OK || len(body.Hashes) == 0 { - br.actor.TellRequest(message.SyncerSvc, &message.GetHashesRsp{Seq:br.syncerSeq, Hashes: nil, PrevInfo: br.prevBlock, Count: 0, Err: message.RemotePeerFailError}) - br.finished = true - br.peer.ConsumeRequest(br.requestID) + body, ok := msgBody.(*types.GetHashesResponse) + if !ok || len(body.Hashes) == 0 { + br.cancelReceiving(message.MissingHashError, false) return } // add to Got for _, block := range body.Hashes { - // unexpected block - br.got = append(br.got, block) - br.offset++ - // check overflow - if br.offset >= int(br.count) { - br.actor.TellRequest(message.SyncerSvc, &message.GetHashesRsp{Seq:br.syncerSeq, Hashes: br.got, PrevInfo: br.prevBlock, Count: uint64(br.offset)}) - br.finished = true - br.peer.ConsumeRequest(br.requestID) + // It also error that response has more hashes than expected(=requested). + if br.offset >= len(br.got) { + br.cancelReceiving(message.TooManyBlocksError, body.HasNext) return } + br.got[br.offset] = block + br.offset++ } - // is it end? + // remote peer hopefully sent last part if !body.HasNext { - if br.offset < br.count { - br.actor.TellRequest(message.SyncerSvc, &message.GetHashesRsp{Seq:br.syncerSeq, Hashes: br.got, PrevInfo: br.prevBlock, Count: 0, Err: message.MissingHashError}) - // not all blocks were filled. this is error - } else { - br.actor.TellRequest(message.SyncerSvc, &message.GetHashesRsp{Seq:br.syncerSeq, Hashes: br.got, PrevInfo: br.prevBlock, Count: uint64(len(br.got))}) - } - br.finished = true - br.peer.ConsumeRequest(br.requestID) + br.actor.TellRequest(message.SyncerSvc, &message.GetHashesRsp{Seq:br.syncerSeq, Hashes: br.got, PrevInfo: br.prevBlock, Count: uint64(len(br.got))}) + br.finishReceiver() } return } + +// cancelReceiving is cancel wait for receiving and send syncer the failure result. +// not all part of response is received, it wait remaining (and useless) response. It is assumed cancelings are not frequently occur +func (br *BlockHashesReceiver) cancelReceiving(err error, hasNext bool) { + br.status = receiverStatusCanceled + br.actor.TellRequest(message.SyncerSvc, + &message.GetHashesRsp{Seq: br.syncerSeq, PrevInfo:br.prevBlock, Err: err}) + + // check time again. since negative duration of timer will not fire channel. + interval := br.timeout.Sub(time.Now()) + if !hasNext || interval <= 0 { + // if remote peer will not send partial response anymore. it it actually same as finish. + br.finishReceiver() + } else { + // canceling in the middle of responses + br.senderFinished = make(chan interface{}) + go func() { + timer := time.NewTimer(interval) + select { + case <-timer.C: + break + case <-br.senderFinished: + break + } + br.peer.ConsumeRequest(br.requestID) + }() + } +} + +// finishReceiver is to cancel works, assuming cancelings are not frequently occur +func (br *BlockHashesReceiver) finishReceiver() { + br.status = receiverStatusFinished + br.peer.ConsumeRequest(br.requestID) +} + +// ignoreMsg is silently ignore following responses, which is not useless anymore. +func (br *BlockHashesReceiver) ignoreMsg(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { + body, ok := msgBody.(*types.GetBlockResponse) + if !ok { + return + } + if !body.HasNext { + // really status from remote peer + select { + case br.senderFinished <- struct{}{}: + default: + } + } +} diff --git a/p2p/hashreceiver_test.go b/p2p/hashreceiver_test.go new file mode 100644 index 000000000..cb3669911 --- /dev/null +++ b/p2p/hashreceiver_test.go @@ -0,0 +1,140 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "github.com/aergoio/aergo/chain" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/funkygao/golib/rand" + "testing" + "time" + + "github.com/aergoio/aergo/message" + "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/aergoio/aergo/p2p/subproto" + "github.com/aergoio/aergo/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" +) + +func TestBlockHashesReceiver_StartGet(t *testing.T) { + sampleBlk := &types.BlockInfo{Hash:dummyBlockHash, No:10000} + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + inputHashes := make([]message.BlockHash, len(sampleBlks)) + for i, hash := range sampleBlks { + inputHashes[i] = hash + } + tests := []struct { + name string + input *message.GetHashes + ttl time.Duration + }{ + {"TSimple", &message.GetHashes{100, dummyPeerID, sampleBlk, 100}, time.Millisecond * 10}, + // TODO: test cases + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + //mockContext := new(mockContext) + mockActor := p2pmock.NewMockActorService(ctrl) + //mockActor.On("SendRequest", message.P2PSvc, mock.AnythingOfType("*types.GetBlock")) + //mockActor.On("TellRequest", message.SyncerSvc, mock.AnythingOfType("*types.GetBlock")) + mockMF := p2pmock.NewMockMoFactory(ctrl) + mockMo := createDummyMo(ctrl) + mockMF.EXPECT().NewMsgBlockRequestOrder(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockMo) + mockPeer := p2pmock.NewMockRemotePeer(ctrl) + mockPeer.EXPECT().MF().Return(mockMF) + mockPeer.EXPECT().SendMessage(mockMo).Times(1) + + expire := time.Now().Add(test.ttl) + br := NewBlockHashesReceiver(mockActor, mockPeer, test.input.Seq, test.input, test.ttl) + + br.StartGet() + + assert.False(t, expire.After(br.timeout)) + }) + } +} + +func TestBlockHashesReceiver_ReceiveResp(t *testing.T) { + //t.Skip("make check by status. and make another test to check handleInWaiting method") + sampleBlk := &types.BlockInfo{Hash:dummyBlockHash, No:10000} + limit := uint64(10) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + chain.Init(1<<20 , "", false, 1, 1 ) + + totalInCnt := 10 + seqNo := uint64(8723) + inputHashes := make([][]byte, totalInCnt) + for i:= 0 ; i < totalInCnt ; i++ { + inputHashes[i] = rand.RandomByteSlice(hashSize) + } + tests := []struct { + name string + input *message.GetHashes + ttl time.Duration + hashInterval time.Duration + hashInput [][][]byte + + // to verify + consumed int + sentResp int + respError bool + }{ + {"TSingleResp", &message.GetHashes{seqNo,dummyPeerID, sampleBlk, limit}, time.Minute, 0, [][][]byte{inputHashes}, 1, 1, false}, + {"TMultiResp", &message.GetHashes{seqNo,dummyPeerID, sampleBlk, limit}, time.Minute, 0, [][][]byte{inputHashes[:1], inputHashes[1:3], inputHashes[3:]}, 1, 1, false}, + // Fail1 remote err + {"TRemoteFail", &message.GetHashes{seqNo,dummyPeerID, sampleBlk, limit}, time.Minute, 0, [][][]byte{inputHashes[:0]}, 1, 1, true}, + {"TTooManyBlks", &message.GetHashes{seqNo,dummyPeerID, sampleBlk, limit-2}, time.Minute*4,0,[][][]byte{inputHashes[:1],inputHashes[1:3],inputHashes[3:]},1,1, true}, + // Fail4 response sent after timeout + {"TTimeout", &message.GetHashes{seqNo,dummyPeerID, sampleBlk, limit}, time.Millisecond * 10, time.Millisecond * 20, [][][]byte{inputHashes[:1], inputHashes[1:3], inputHashes[3:]}, 1, 0, false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + //mockContext := new(mockContext) + mockActor := p2pmock.NewMockActorService(ctrl) + if test.sentResp > 0 { + mockActor.EXPECT().TellRequest(message.SyncerSvc, gomock.AssignableToTypeOf(&message.GetHashesRsp{})). + DoAndReturn(func(a string, arg *message.GetHashesRsp) { + if !((arg.Err != nil) == test.respError) { + t.Fatalf("Wrong error (have %v)\n", arg.Err) + } + if arg.Seq != seqNo { + t.Fatalf("Wrong seqNo %d, want %d)\n", arg.Seq, seqNo) + } + }).Times(test.sentResp) + } + + mockMF := p2pmock.NewMockMoFactory(ctrl) + mockMo := createDummyMo(ctrl) + mockMF.EXPECT().NewMsgBlockRequestOrder(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockMo) + mockPeer := p2pmock.NewMockRemotePeer(ctrl) + mockPeer.EXPECT().ID().Return(dummyPeerID).AnyTimes() + mockPeer.EXPECT().MF().Return(mockMF) + mockPeer.EXPECT().SendMessage(gomock.Any()).Times(1) + mockPeer.EXPECT().ConsumeRequest(gomock.Any()).Times(test.consumed) //mock.AnythingOfType("p2pcommon.MsgID")) + + //expire := time.Now().Add(test.ttl) + br := NewBlockHashesReceiver(mockActor, mockPeer, seqNo, test.input, test.ttl) + br.StartGet() + + msg := p2pcommon.NewSimpleMsgVal(subproto.GetHashesRequest, sampleMsgID) + for i, hashes := range test.hashInput { + if test.hashInterval > 0 { + time.Sleep(test.hashInterval) + } + body := &types.GetHashesResponse{Hashes: hashes, HasNext: i < len(test.hashInput)-1} + br.ReceiveResp(msg, body) + if br.status == receiverStatusFinished { + break + } + } + + }) + } +} diff --git a/p2p/mockMsgSigner_test.go b/p2p/mockMsgSigner_test.go deleted file mode 100644 index 557db2ba3..000000000 --- a/p2p/mockMsgSigner_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package p2p - -import ( - "github.com/libp2p/go-libp2p-peer" -) -import "github.com/stretchr/testify/mock" - -import "github.com/aergoio/aergo/types" - -// mockMsgSigner is an autogenerated mock type for the msgSigner type -type mockMsgSigner struct { - mock.Mock -} - -// signMsg provides a mock function with given fields: msg -func (_m *mockMsgSigner) signMsg(msg *types.P2PMessage) error { - ret := _m.Called(msg) - - var r0 error - if rf, ok := ret.Get(0).(func(*types.P2PMessage) error); ok { - r0 = rf(msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// verifyMsg provides a mock function with given fields: msg, pubKey -func (_m *mockMsgSigner) verifyMsg(msg *types.P2PMessage, senderID peer.ID) error { - ret := _m.Called(msg, senderID) - - var r0 error - if rf, ok := ret.Get(0).(func(*types.P2PMessage, peer.ID) error); ok { - r0 = rf(msg, senderID) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/p2p/mofactory.go b/p2p/mofactory.go new file mode 100644 index 000000000..d9c152d38 --- /dev/null +++ b/p2p/mofactory.go @@ -0,0 +1,104 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/aergoio/aergo/p2p/subproto" + "time" + + "github.com/aergoio/aergo/types" + "github.com/gofrs/uuid" +) + +type baseMOFactory struct { +} + +func (mf *baseMOFactory) NewMsgRequestOrder(expectResponse bool, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { + rmo := &pbRequestOrder{} + msgID := uuid.Must(uuid.NewV4()) + if mf.newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, protocolID, message) { + return rmo + } + return nil +} + +func (mf *baseMOFactory) NewMsgBlockRequestOrder(respReceiver p2pcommon.ResponseReceiver, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { + rmo := &pbRequestOrder{} + msgID := uuid.Must(uuid.NewV4()) + if mf.newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, protocolID, message) { + rmo.respReceiver = respReceiver + return rmo + } + return nil +} + +func (mf *baseMOFactory) NewMsgResponseOrder(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { + rmo := &pbResponseOrder{} + msgID := uuid.Must(uuid.NewV4()) + if mf.newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.FromBytesOrNil(reqID[:]), protocolID, message) { + return rmo + } + return nil +} + +func (mf *baseMOFactory) NewMsgBlkBroadcastOrder(noticeMsg *types.NewBlockNotice) p2pcommon.MsgOrder { + rmo := &pbBlkNoticeOrder{} + msgID := uuid.Must(uuid.NewV4()) + if mf.newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, subproto.NewBlockNotice, noticeMsg) { + rmo.blkHash = noticeMsg.BlockHash + rmo.blkNo = noticeMsg.BlockNo + return rmo + } + return nil +} + +func (mf *baseMOFactory) NewMsgTxBroadcastOrder(message *types.NewTransactionsNotice) p2pcommon.MsgOrder { + rmo := &pbTxNoticeOrder{} + reqID := uuid.Must(uuid.NewV4()) + if mf.newV030MsgOrder(&rmo.pbMessageOrder, reqID, uuid.Nil, subproto.NewTxNotice, message) { + rmo.txHashes = message.TxHashes + return rmo + } + return nil +} + +func (mf *baseMOFactory) NewMsgBPBroadcastOrder(noticeMsg *types.BlockProducedNotice) p2pcommon.MsgOrder { + rmo := &pbBpNoticeOrder{} + msgID := uuid.Must(uuid.NewV4()) + if mf.newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, subproto.BlockProducedNotice, noticeMsg) { + rmo.block = noticeMsg.Block + return rmo + } + return nil +} + +func (mf *baseMOFactory) newHandshakeMessage(protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.Message { + // TODO define handshake specific datatype + rmo := &pbRequestOrder{} + msgID := uuid.Must(uuid.NewV4()) + if mf.newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, protocolID, message) { + return rmo.message + } + return nil +} + +// newPbMsgOrder is base form of making sendrequest struct +func (mf *baseMOFactory)newV030MsgOrder(mo *pbMessageOrder, msgID, orgID uuid.UUID, protocolID p2pcommon.SubProtocol, messageBody p2pcommon.MessageBody) bool { + id :=p2pcommon.MsgID(msgID) + originalid := p2pcommon.MsgID(orgID) + bytes, err := p2putil.MarshalMessageBody(messageBody) + if err != nil { + return false + } + msg := p2pcommon.NewMessageValue(protocolID, id, originalid, time.Now().UnixNano(), bytes) + mo.protocolID = protocolID + mo.needSign = true + mo.message = msg + + return true +} diff --git a/p2p/networktransport_test.go b/p2p/networktransport_test.go deleted file mode 100644 index 5863b2769..000000000 --- a/p2p/networktransport_test.go +++ /dev/null @@ -1,103 +0,0 @@ -/* - * @file - * @copyright defined in aergo/LICENSE.txt - */ - -package p2p - -import ( - "fmt" - "testing" - "time" - - "github.com/aergoio/aergo/p2p/p2pcommon" - "github.com/aergoio/aergo/p2p/p2pmock" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" - - "github.com/aergoio/aergo-lib/log" - "github.com/aergoio/aergo/config" - cfg "github.com/aergoio/aergo/config" - "github.com/aergoio/aergo/message" - "github.com/aergoio/aergo/types" - "github.com/libp2p/go-libp2p-peer" -) - -// TODO split this test into two... one is to attempt make connection and the other is test peermanager if same peerid is given -// Ignoring test for now, for lack of abstraction on AergoPeer struct -func IgrenoreTestP2PServiceRunAddPeer(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockActor := p2pmock.NewMockActorService(ctrl) - dummyBlock := types.Block{Hash: dummyBlockHash, Header: &types.BlockHeader{BlockNo: dummyBlockHeight}} - mockActor.EXPECT().CallRequest(gomock.Any(), gomock.Any(), gomock.Any()).Return(message.GetBlockRsp{Block: &dummyBlock}, nil) - //mockMF := new(MockMoFactory) - target := &networkTransport{conf: config.NewServerContext("", "").GetDefaultConfig().(*config.Config).P2P, - logger: log.NewLogger("test.p2p")} - - //target.Host = &mockHost{peerstore.NewPeerstore(pstoremem.NewKeyBook(), pstoremem.NewAddrBook(), pstoremem.NewPeerMetadata())} - target.Host = p2pmock.NewMockHost(ctrl) - target.selfMeta.ID = peer.ID("gwegw") - - sampleAddr1 := p2pcommon.PeerMeta{ID: "ddd", IPAddress: "192.168.0.1", Port: 33888, Outbound: true} - sampleAddr2 := p2pcommon.PeerMeta{ID: "fff", IPAddress: "192.168.0.2", Port: 33888, Outbound: true} - target.GetOrCreateStream(sampleAddr1, p2pcommon.AergoP2PSub) - target.GetOrCreateStream(sampleAddr1, p2pcommon.AergoP2PSub) - time.Sleep(time.Second) - if len(target.Peerstore().Peers()) != 1 { - t.Errorf("Peer count : Expected %d, Actually %d", 1, len(target.Peerstore().Peers())) - } - target.GetOrCreateStream(sampleAddr2, p2pcommon.AergoP2PSub) - time.Sleep(time.Second * 1) - if len(target.Peerstore().Peers()) != 2 { - t.Errorf("Peer count : Expected %d, Actually %d", 2, len(target.Peerstore().Peers())) - } -} - -func Test_networkTransport_initSelfMeta(t *testing.T) { - type args struct { - peerID peer.ID - noExpose bool - } - tests := []struct { - name string - conf *cfg.P2PConfig - - args args - - wantSameAddr bool - wantPort uint32 - wantID peer.ID - wantHidden bool - }{ - {"TIP6", &cfg.P2PConfig{NetProtocolAddr: "fe80::dcbf:beff:fe87:e30a", NetProtocolPort: 7845}, args{dummyPeerID, false}, true, 7845, dummyPeerID, false}, - {"TIP4", &cfg.P2PConfig{NetProtocolAddr: "211.1.1.2", NetProtocolPort: 7845}, args{dummyPeerID, false}, true, 7845, dummyPeerID, false}, - {"TDN", &cfg.P2PConfig{NetProtocolAddr: "www.aergo.io", NetProtocolPort: 7845}, args{dummyPeerID, false}, true, 7845, dummyPeerID, false}, - {"TDefault", &cfg.P2PConfig{NetProtocolAddr: "", NetProtocolPort: 7845}, args{dummyPeerID, false}, false, 7845, dummyPeerID, false}, - {"THidden", &cfg.P2PConfig{NetProtocolAddr: "211.1.1.2", NetProtocolPort: 7845}, args{dummyPeerID, true}, true, 7845, dummyPeerID, true}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sl := &networkTransport{ - conf: tt.conf, - logger: logger, - } - - sl.initSelfMeta(tt.args.peerID, tt.args.noExpose) - - if tt.wantSameAddr { - assert.Equal(t, tt.conf.NetProtocolAddr, sl.selfMeta.IPAddress) - } else { - assert.NotEqual(t, tt.conf.NetProtocolAddr, sl.selfMeta.IPAddress) - } - assert.Equal(t, tt.wantPort, sl.selfMeta.Port) - assert.Equal(t, tt.wantID, sl.selfMeta.ID) - assert.Equal(t, tt.wantHidden, sl.selfMeta.Hidden) - - assert.NotNil(t, sl.bindAddress) - fmt.Println("ProtocolAddress: ", sl.selfMeta.IPAddress) - fmt.Println("bindAddress: ", sl.bindAddress.String()) - }) - } -} diff --git a/p2p/p2p.go b/p2p/p2p.go index 1d3fa1840..ab927b213 100644 --- a/p2p/p2p.go +++ b/p2p/p2p.go @@ -6,35 +6,28 @@ package p2p import ( - "os" - "path/filepath" + "github.com/aergoio/aergo/p2p/p2pkey" + "github.com/aergoio/aergo/p2p/raftsupport" + "github.com/aergoio/aergo/p2p/transport" "sync" "time" + "github.com/aergoio/aergo/consensus" "github.com/aergoio/aergo/p2p/metric" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2putil" - subproto "github.com/aergoio/aergo/p2p/subproto" + "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo-actor/actor" "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/chain" "github.com/aergoio/aergo/config" - "github.com/aergoio/aergo/internal/enc" "github.com/aergoio/aergo/message" "github.com/aergoio/aergo/pkg/component" "github.com/aergoio/aergo/types" - crypto "github.com/libp2p/go-libp2p-crypto" peer "github.com/libp2p/go-libp2p-peer" ) -type nodeInfo struct { - id peer.ID - sid string - pubKey crypto.PubKey - privKey crypto.PrivKey -} - // P2P is actor component for p2p type P2P struct { *component.BaseComponent @@ -43,98 +36,27 @@ type P2P struct { chainID *types.ChainID nt p2pcommon.NetworkTransport pm p2pcommon.PeerManager + vm p2pcommon.VersionedManager sm p2pcommon.SyncManager mm metric.MetricsManager mf p2pcommon.MoFactory signer p2pcommon.MsgSigner ca types.ChainAccessor + consacc consensus.ConsensusAccessor mutex sync.Mutex } -type HandlerFactory interface { - insertHandlers(peer *remotePeerImpl) -} - var ( - _ p2pcommon.ActorService = (*P2P)(nil) - _ HSHandlerFactory = (*P2P)(nil) - ni *nodeInfo + _ p2pcommon.ActorService = (*P2P)(nil) + _ p2pcommon.HSHandlerFactory = (*P2P)(nil) ) -// InitNodeInfo initializes node-specific informations like node id. -// Caution: this must be called before all the goroutines are started. -func InitNodeInfo(baseCfg *config.BaseConfig, p2pCfg *config.P2PConfig, logger *log.Logger) { - // check Key and address - var ( - priv crypto.PrivKey - pub crypto.PubKey - err error - ) - - if p2pCfg.NPKey != "" { - priv, pub, err = LoadKeyFile(p2pCfg.NPKey) - if err != nil { - panic("Failed to load Keyfile '" + p2pCfg.NPKey + "' " + err.Error()) - } - } else { - logger.Info().Msg("No private key file is configured, so use auto-generated pk file instead.") - - autogenFilePath := filepath.Join(baseCfg.AuthDir, DefaultPkKeyPrefix+DefaultPkKeyExt) - if _, err := os.Stat(autogenFilePath); os.IsNotExist(err) { - logger.Info().Str("pk_file", autogenFilePath).Msg("Generate new private key file.") - priv, pub, err = GenerateKeyFile(baseCfg.AuthDir, DefaultPkKeyPrefix) - if err != nil { - panic("Failed to generate new pk file: " + err.Error()) - } - } else { - logger.Info().Str("pk_file", autogenFilePath).Msg("Load existing generated private key file.") - priv, pub, err = LoadKeyFile(autogenFilePath) - if err != nil { - panic("Failed to load generated pk file '" + autogenFilePath + "' " + err.Error()) - } - } - } - id, _ := peer.IDFromPublicKey(pub) - - ni = &nodeInfo{ - id: id, - sid: enc.ToString([]byte(id)), - pubKey: pub, - privKey: priv, - } - - p2putil.UseFullID = p2pCfg.LogFullPeerID -} - -// NodeID returns the node id. -func NodeID() peer.ID { - return ni.id -} - -// NodeSID returns the string representation of the node id. -func NodeSID() string { - if ni == nil { - return "" - } - return ni.sid -} - -// NodePrivKey returns the private key of the node. -func NodePrivKey() crypto.PrivKey { - return ni.privKey -} - -// NodePubKey returns the public key of the node. -func NodePubKey() crypto.PubKey { - return ni.pubKey -} - // NewP2P create a new ActorService for p2p func NewP2P(cfg *config.Config, chainsvc *chain.ChainService) *P2P { p2psvc := &P2P{} p2psvc.BaseComponent = component.NewBaseComponent(message.P2PSvc, p2psvc, log.NewLogger("p2p")) - p2psvc.init(cfg, chainsvc) + p2psvc.initP2P(cfg, chainsvc) return p2psvc } @@ -180,15 +102,19 @@ func (p2ps *P2P) GetNetworkTransport() p2pcommon.NetworkTransport { return p2ps.nt } -func (p2ps *P2P) GetPeerAccessor() types.PeerAccessor { +func (p2ps *P2P) GetPeerAccessor() p2pcommon.PeerAccessor { return p2ps.pm } +func (p2ps *P2P) SetConsensusAccessor(ca consensus.ConsensusAccessor) { + p2ps.consacc = ca +} + func (p2ps *P2P) ChainID() *types.ChainID { return p2ps.chainID } -func (p2ps *P2P) init(cfg *config.Config, chainsvc *chain.ChainService) { +func (p2ps *P2P) initP2P(cfg *config.Config, chainsvc *chain.ChainService) { p2ps.ca = chainsvc // check genesis block and get meta informations from it @@ -204,14 +130,19 @@ func (p2ps *P2P) init(cfg *config.Config, chainsvc *chain.ChainService) { } p2ps.chainID = chainID - netTransport := NewNetworkTransport(cfg.P2P, p2ps.Logger) - signer := newDefaultMsgSigner(ni.privKey, ni.pubKey, ni.id) - mf := &v030MOFactory{} + useRaft := genesis.ConsensusType() == consensus.ConsensusName[consensus.ConsensusRAFT] + + netTransport := transport.NewNetworkTransport(cfg.P2P, p2ps.Logger) + signer := newDefaultMsgSigner(p2pkey.NodePrivKey(), p2pkey.NodePubKey(), p2pkey.NodeID()) + + // TODO: it should be refactored to support multi version + mf := &baseMOFactory{} + //reconMan := newReconnectManager(p2ps.Logger) metricMan := metric.NewMetricManager(10) - peerMan := NewPeerManager(p2ps, p2ps, p2ps, cfg, signer, netTransport, metricMan, p2ps.Logger, mf) + peerMan := NewPeerManager(p2ps, p2ps, p2ps, cfg, signer, netTransport, metricMan, p2ps.Logger, mf, useRaft) syncMan := newSyncManager(p2ps, peerMan, p2ps.Logger) - + versionMan := newDefaultVersionManager(peerMan, p2ps, p2ps.Logger, p2ps.chainID) // connect managers each other //reconMan.pm = peerMan @@ -220,6 +151,7 @@ func (p2ps *P2P) init(cfg *config.Config, chainsvc *chain.ChainService) { p2ps.nt = netTransport p2ps.mf = mf p2ps.pm = peerMan + p2ps.vm = versionMan p2ps.sm = syncMan //p2ps.rm = reconMan p2ps.mm = metricMan @@ -278,10 +210,14 @@ func (p2ps *P2P) Receive(context actor.Context) { p2ps.checkAndAddPeerAddresses(msg.Peers) } } + case *message.GetCluster: + peers := p2ps.pm.GetPeers() + clusterReceiver := raftsupport.NewClusterInfoReceiver(p2ps, p2ps.mf, peers, time.Second*5, msg) + clusterReceiver.StartGet() } } -// TODO need refactoring. this code is copied from subprotcoladdrs.go +// TODO need refactoring. this code is copied from subproto/addrs.go func (p2ps *P2P) checkAndAddPeerAddresses(peers []*types.PeerAddress) { selfPeerID := p2ps.pm.SelfNodeID() peerMetas := make([]p2pcommon.PeerMeta, 0, len(peers)) @@ -318,7 +254,7 @@ func (p2ps *P2P) FutureRequest(actor string, msg interface{}, timeout time.Durat // FutureRequestDefaultTimeout implement interface method of ActorService func (p2ps *P2P) FutureRequestDefaultTimeout(actor string, msg interface{}) *actor.Future { - return p2ps.RequestToFuture(actor, msg, DefaultActorMsgTTL) + return p2ps.RequestToFuture(actor, msg, p2pcommon.DefaultActorMsgTTL) } // CallRequest implement interface method of ActorService @@ -329,7 +265,7 @@ func (p2ps *P2P) CallRequest(actor string, msg interface{}, timeout time.Duratio // CallRequest implement interface method of ActorService func (p2ps *P2P) CallRequestDefaultTimeout(actor string, msg interface{}) (interface{}, error) { - future := p2ps.RequestToFuture(actor, msg, DefaultActorMsgTTL) + future := p2ps.RequestToFuture(actor, msg, p2pcommon.DefaultActorMsgTTL) return future.Result() } @@ -338,44 +274,56 @@ func (p2ps *P2P) GetChainAccessor() types.ChainAccessor { return p2ps.ca } -func (p2ps *P2P) insertHandlers(peer *remotePeerImpl) { +func (p2ps *P2P) InsertHandlers(peer p2pcommon.RemotePeer) { logger := p2ps.Logger // PingHandlers - peer.handlers[subproto.PingRequest] = subproto.NewPingReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.PingResponse] = subproto.NewPingRespHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GoAway] = subproto.NewGoAwayHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.AddressesRequest] = subproto.NewAddressesReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.AddressesResponse] = subproto.NewAddressesRespHandler(p2ps.pm, peer, logger, p2ps) + peer.AddMessageHandler(subproto.PingRequest, subproto.NewPingReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.PingResponse, subproto.NewPingRespHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GoAway, subproto.NewGoAwayHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.AddressesRequest, subproto.NewAddressesReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.AddressesResponse, subproto.NewAddressesRespHandler(p2ps.pm, peer, logger, p2ps)) // BlockHandlers - peer.handlers[subproto.GetBlocksRequest] = subproto.NewBlockReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetBlocksResponse] = subproto.NewBlockRespHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm) - peer.handlers[subproto.GetBlockHeadersRequest] = subproto.NewListBlockHeadersReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetBlockHeadersResponse] = subproto.NewListBlockRespHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.NewBlockNotice] = subproto.NewNewBlockNoticeHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm) - peer.handlers[subproto.GetAncestorRequest] = subproto.NewGetAncestorReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetAncestorResponse] = subproto.NewGetAncestorRespHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetHashesRequest] = subproto.NewGetHashesReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetHashesResponse] = subproto.NewGetHashesRespHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetHashByNoRequest] = subproto.NewGetHashByNoReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetHashByNoResponse] = subproto.NewGetHashByNoRespHandler(p2ps.pm, peer, logger, p2ps) + peer.AddMessageHandler(subproto.GetBlocksRequest, subproto.NewBlockReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetBlocksResponse, subproto.NewBlockRespHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm)) + peer.AddMessageHandler(subproto.GetBlockHeadersRequest, subproto.NewListBlockHeadersReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetBlockHeadersResponse, subproto.NewListBlockRespHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.NewBlockNotice, subproto.NewNewBlockNoticeHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm)) + peer.AddMessageHandler(subproto.GetAncestorRequest, subproto.NewGetAncestorReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetAncestorResponse, subproto.NewGetAncestorRespHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetHashesRequest, subproto.NewGetHashesReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetHashesResponse, subproto.NewGetHashesRespHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetHashByNoRequest, subproto.NewGetHashByNoReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetHashByNoResponse, subproto.NewGetHashByNoRespHandler(p2ps.pm, peer, logger, p2ps)) // TxHandlers - peer.handlers[subproto.GetTXsRequest] = subproto.NewTxReqHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.GetTXsResponse] = subproto.NewTxRespHandler(p2ps.pm, peer, logger, p2ps) - peer.handlers[subproto.NewTxNotice] = subproto.NewNewTxNoticeHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm) + peer.AddMessageHandler(subproto.GetTXsRequest, subproto.NewTxReqHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.GetTXsResponse, subproto.NewTxRespHandler(p2ps.pm, peer, logger, p2ps)) + peer.AddMessageHandler(subproto.NewTxNotice, subproto.NewNewTxNoticeHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm)) // BP protocol handlers - peer.handlers[subproto.BlockProducedNotice] = subproto.NewBlockProducedNoticeHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm) + peer.AddMessageHandler(subproto.BlockProducedNotice, subproto.NewBlockProducedNoticeHandler(p2ps.pm, peer, logger, p2ps, p2ps.sm)) + + // Raft support + peer.AddMessageHandler(subproto.GetClusterRequest, subproto.NewGetClusterReqHandler(p2ps.pm, peer, logger, p2ps, p2ps.consacc)) + peer.AddMessageHandler(subproto.GetClusterResponse, subproto.NewGetClusterRespHandler(p2ps.pm, peer, logger, p2ps)) } -func (p2ps *P2P) CreateHSHandler(outbound bool, pm p2pcommon.PeerManager, actor p2pcommon.ActorService, log *log.Logger, pid peer.ID) HSHandler { - handshakeHandler := &PeerHandshaker{pm: pm, actorServ: actor, logger: log, localChainID: p2ps.chainID, peerID: pid} - if outbound { - return &OutboundHSHandler{PeerHandshaker: handshakeHandler} +func (p2ps *P2P) CreateHSHandler(p2pVersion p2pcommon.P2PVersion, outbound bool, pid peer.ID) p2pcommon.HSHandler { + if p2pVersion == p2pcommon.P2PVersion030 { + handshakeHandler := newHandshaker(p2ps.pm, p2ps, p2ps.Logger, p2ps.chainID, pid) + if outbound { + return &OutboundHSHandler{LegacyWireHandshaker: handshakeHandler} + } else { + return &InboundHSHandler{LegacyWireHandshaker: handshakeHandler} + } } else { - return &InboundHSHandler{PeerHandshaker: handshakeHandler} + if outbound { + return NewOutbountHSHandler(p2ps.pm, p2ps, p2ps.vm, p2ps.Logger, p2ps.chainID, pid) + } else { + return NewInbountHSHandler(p2ps.pm, p2ps, p2ps.vm, p2ps.Logger, p2ps.chainID, pid) + } } } diff --git a/p2p/p2pcommon/consts.go b/p2p/p2pcommon/consts.go index 15058d4c8..873d81514 100644 --- a/p2p/p2pcommon/consts.go +++ b/p2p/p2pcommon/consts.go @@ -6,7 +6,9 @@ package p2pcommon import ( + "fmt" protocol "github.com/libp2p/go-libp2p-protocol" + "time" ) // constants of p2p protocol since v0.3 @@ -15,8 +17,6 @@ const ( MAGICMain uint32 = 0x47416841 MAGICTest uint32 = 0x2e415429 - P2PVersion030 uint32 = 0x00000300 - SigLength = 16 MaxPayloadLength = 1 << 23 // 8MB @@ -25,7 +25,66 @@ const ( MaxBlockResponseCount = 2000 ) +// P2PVersion is verion of p2p wire protocol. This version affects p2p handshake, data format transferred, etc +type P2PVersion uint32 + +func (v P2PVersion) Uint32() uint32 { + return uint32(v) +} + +func (v P2PVersion) String() string { + return fmt.Sprintf("%d.%d.%d", v&0x7fff0000, v&0x0000ff00, v&0x000000ff) +} + +const ( + P2PVersionUnknown P2PVersion = 0x00000000 + P2PVersion030 P2PVersion = 0x00000300 + P2PVersion031 P2PVersion = 0x00000301 // pseudo version for supporting multiversion +) + // context of multiaddr, as higher type of p2p message const ( - AergoP2PSub protocol.ID = "/aergop2p/0.3" + LegacyP2PSubAddr protocol.ID = "/aergop2p/0.3" + P2PSubAddr protocol.ID = "/aergop2p" +) + +// constatns for hanshake. for cacluating byte offset of wire handshake +const ( + V030HSHeaderLength = 8 + HSMagicLength = 4 + HSVersionLength = 4 + HSVerCntLength = 4 +) +const HSMaxVersionCnt = 16 + +const HSError uint32 = 0 + +// Codes in wire handshake +const ( + _ uint32 = iota + ErrWrongHSReq + ErrNoMatchedVersion // + +) + +// constants about private key +const ( + DefaultPkKeyPrefix = "aergo-peer" + DefaultPkKeyExt = ".key" + DefaultPubKeyExt = ".pub" + DefaultPeerIDExt = ".id" +) + +// constants for inter-communication of aergosvr +const ( + // other actor + DefaultActorMsgTTL = time.Second * 4 +) + +const ( + // DesignatedNodeTTL is time to determine which the remote designated peer is not working. + DesignatedNodeTTL = time.Minute * 60 + + // DefaultNodeTTL is time to determine which the remote peer is not working. + DefaultNodeTTL = time.Minute * 10 ) diff --git a/p2p/p2pcommon/handshake.go b/p2p/p2pcommon/handshake.go new file mode 100644 index 000000000..5a41d4cff --- /dev/null +++ b/p2p/p2pcommon/handshake.go @@ -0,0 +1,101 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2pcommon + +import ( + "context" + "encoding/binary" + "github.com/aergoio/aergo/types" + "github.com/libp2p/go-libp2p-peer" + "io" + "time" +) + + +// HSHandlerFactory is creator of HSHandler +type HSHandlerFactory interface { + CreateHSHandler(p2pVersion P2PVersion, outbound bool, pid peer.ID) HSHandler +} + +// HSHandler handles whole process of connect, handshake, create of remote Peerseer +type HSHandler interface { + // Handle peer handshake till ttl, and return msgrw for this connection, and status of remote peer. + Handle(r io.Reader, w io.Writer, ttl time.Duration) (MsgReadWriter, *types.Status, error) +} + +type VersionedManager interface { + FindBestP2PVersion(versions []P2PVersion) P2PVersion + GetVersionedHandshaker(version P2PVersion, peerID peer.ID, r io.Reader, w io.Writer) (VersionedHandshaker, error) + + InjectHandlers(version P2PVersion, peer RemotePeer) +} + +// VersionedHandshaker do handshake related to chain, and return msgreadwriter for a protocol version +type VersionedHandshaker interface { + DoForOutbound(ctx context.Context) (*types.Status, error) + DoForInbound(ctx context.Context) (*types.Status, error) + GetMsgRW() MsgReadWriter +} + +// HSHeader is legacy type of data which peer send first to listening peer in wire handshake +type HSHeader struct { + Magic uint32 + Version P2PVersion +} + +func (h HSHeader) Marshal() []byte { + b := make([]byte, V030HSHeaderLength) + binary.BigEndian.PutUint32(b, h.Magic) + binary.BigEndian.PutUint32(b[4:], uint32(h.Version)) + return b +} + +func (h *HSHeader) Unmarshal(b []byte) { + h.Magic = binary.BigEndian.Uint32(b) + h.Version = P2PVersion(binary.BigEndian.Uint32(b[4:])) +} + +// HSHeadReq is data which peer send first to listening peer in wire handshake +type HSHeadReq struct { + Magic uint32 + // Versions are p2p versions which the connecting peer can support. + Versions []P2PVersion +} + +func (h HSHeadReq) Marshal() []byte { + verCount := len(h.Versions) + b := make([]byte, HSMagicLength+HSVerCntLength+HSVersionLength*verCount) + offset := 0 + binary.BigEndian.PutUint32(b[offset:], h.Magic) + offset += HSMagicLength + binary.BigEndian.PutUint32(b[offset:], uint32(verCount)) + offset += HSVerCntLength + for _, version := range h.Versions { + binary.BigEndian.PutUint32(b[offset:], version.Uint32()) + offset += HSVersionLength + } + return b +} + +// HSHeadResp is data which listening peer send back to connecting peer as response +type HSHeadResp struct { + // Magic will be same as the magic in HSHeadReq if wire handshake is successful, or 0 if not. + Magic uint32 + // RespCode is different meaning by value of Magic. It is p2p version which listening peer will use, if wire handshake is succesful, or errCode otherwise. + RespCode uint32 +} + +func (h HSHeadResp) Marshal() []byte { + b := make([]byte, V030HSHeaderLength) + binary.BigEndian.PutUint32(b, h.Magic) + binary.BigEndian.PutUint32(b[4:], h.RespCode) + return b +} + +func (h *HSHeadResp) Unmarshal(b []byte) { + h.Magic = binary.BigEndian.Uint32(b) + h.RespCode = binary.BigEndian.Uint32(b[4:]) +} diff --git a/p2p/p2pcommon/handshake_test.go b/p2p/p2pcommon/handshake_test.go new file mode 100644 index 000000000..2bc18ad73 --- /dev/null +++ b/p2p/p2pcommon/handshake_test.go @@ -0,0 +1,105 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2pcommon + +import ( + "bytes" + "github.com/stretchr/testify/assert" + "reflect" + "testing" +) + +func TestHSHeader_Marshal(t *testing.T) { + type fields struct { + Magic uint32 + Version P2PVersion + } + tests := []struct { + name string + fields fields + wantLen int + }{ + {"T1", fields{MAGICMain, P2PVersion030}, 8}, + {"T2", fields{MAGICTest, P2PVersion031}, 8}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := HSHeader{ + Magic: tt.fields.Magic, + Version: tt.fields.Version, + } + got := h.Marshal() + if !reflect.DeepEqual(len(got), tt.wantLen) { + t.Errorf("HSHeader.Marshal() = %v, want %v", len(got), tt.wantLen) + } + got2 := HSHeader{} + got2.Unmarshal(got) + + if !reflect.DeepEqual(got2, h) { + t.Errorf("HSHeader.Unmarshal() = %v, want %v", got2, h) + } + + }) + } +} + +func TestHSHeader_Marshal2(t *testing.T) { + tests := []struct { + name string + input []byte + expectedNewwork uint32 + expectedVersion P2PVersion + }{ + {"TMain030", []byte{0x047, 0x041, 0x68, 0x41, 0, 0, 3, 0}, MAGICMain, P2PVersion030}, + {"TMain020", []byte{0x02e, 0x041, 0x54, 0x29, 0, 1, 3, 5}, MAGICTest, 0x010305}, + // TODO: test cases + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hs := HSHeader{} + hs.Unmarshal(test.input) + assert.Equal(t, test.expectedNewwork, hs.Magic) + assert.Equal(t, test.expectedVersion, hs.Version) + + actualBytes := hs.Marshal() + assert.True(t, bytes.Equal(test.input, actualBytes)) + }) + } +} + +func TestOutHSHeader_Marshal(t *testing.T) { + type fields struct { + Magic uint32 + Versions []P2PVersion + } + tests := []struct { + name string + fields fields + wantLen int + }{ + {"TEmpty", fields{MAGICMain, nil}, 8}, + {"TSingle", fields{MAGICMain, []P2PVersion{P2PVersion030}}, 12}, + {"TSingle", fields{MAGICMain, []P2PVersion{0x033333, 0x092fa10, P2PVersion031,P2PVersion030}}, 24}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := HSHeadReq{ + Magic: tt.fields.Magic, + Versions: tt.fields.Versions, + } + + got := h.Marshal() + if !reflect.DeepEqual(len(got), tt.wantLen) { + t.Errorf("HSHeader.Marshal() = %v, want %v", len(got), tt.wantLen) + } + //got2 := HSHeadReq{} + //got2.Unmarshal(got) + //if !reflect.DeepEqual(got2, got) { + // t.Errorf("HSHeader.Unmarshal() = %v, want %v", got2, got) + //} + }) + } +} diff --git a/p2p/p2pcommon/message.go b/p2p/p2pcommon/message.go index 7002d2da7..786a8d652 100644 --- a/p2p/p2pcommon/message.go +++ b/p2p/p2pcommon/message.go @@ -5,18 +5,62 @@ package p2pcommon -// +import ( + "github.com/aergoio/aergo/types" + "github.com/golang/protobuf/proto" + "github.com/libp2p/go-libp2p-peer" +) + +// Message is unit structure transferred from a peer to another peer. type Message interface { Subprotocol() SubProtocol - // Length is lenght of payload + // Length is lentgh of payload Length() uint32 + + // Timestamp is when this message was created with unixnano format Timestamp() int64 + // ID is 16 bytes unique identifier ID() MsgID + // OriginalID is message id of request which trigger this message. it will be all zero, if message is request or notice. OriginalID() MsgID - // marshaled by google protocol buffer v3. object is determined by Subprotocol + // Payload is MessageBody struct, marshaled by google protocol buffer v3. object is determined by Subprotocol Payload() []byte } + +// MessageBody is content of p2p message. +// The actual data types are varied by subprotocol, so +// For version 0.3.x, it is just wrapper of proto.Message +type MessageBody interface { + proto.Message +} + +type HandlerFactory interface { + InsertHandlers(peer RemotePeer) +} + +// MessageHandler handle incoming message +type MessageHandler interface { + ParsePayload([]byte) (MessageBody, error) + CheckAuth(msg Message, msgBody MessageBody) error + Handle(msg Message, msgBody MessageBody) + PreHandle() + PostHandle(msg Message, msgBody MessageBody) +} + +// MsgSigner sign or verify p2p message +// this is not used since v0.3, but interface is not removed for future version. +type MsgSigner interface { + // signMsg calulate signature and fill related fields in msg(peerid, pubkey, signature or etc) + SignMsg(msg *types.P2PMessage) error + // verifyMsg check signature is valid + VerifyMsg(msg *types.P2PMessage, senderID peer.ID) error +} + +// ResponseReceiver is handler function for the corresponding response message. +// It returns true when receiver handled it, or false if this receiver is not the expected handler. +type ResponseReceiver func(Message, MessageBody) bool + diff --git a/p2p/p2pcommon/messagevalue.go b/p2p/p2pcommon/messagevalue.go new file mode 100644 index 000000000..d7bc179a5 --- /dev/null +++ b/p2p/p2pcommon/messagevalue.go @@ -0,0 +1,77 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2pcommon + +import "time" + +// MessageValue is basic implementation of Message. It is used since p2p v0.3 +type MessageValue struct { + subProtocol SubProtocol + // Length is lenght of payload + length uint32 + // timestamp is unix time (precision of second) + timestamp int64 + // ID is 16 bytes unique identifier + id MsgID + // OriginalID is message id of request which trigger this message. it will be all zero, if message is request or notice. + originalID MsgID + + // marshaled by google protocol buffer v3. object is determined by Subprotocol + payload []byte +} + +// NewLiteMessageValue create MessageValue object which payload is empty +func NewLiteMessageValue(protocol SubProtocol, msgID, originalID MsgID, timestamp int64,) *MessageValue { + return &MessageValue{id: msgID, originalID: originalID, timestamp: timestamp, subProtocol: protocol} +} + +// NewMessageValue create a new object +func NewMessageValue(protocol SubProtocol, msgID, originalID MsgID, timestamp int64, payload []byte) *MessageValue { + msg := NewLiteMessageValue(protocol, msgID, originalID, timestamp) + msg.SetPayload(payload) + return msg +} + +func NewSimpleMsgVal(protocol SubProtocol, msgID MsgID) *MessageValue { + return NewLiteMessageValue(protocol, msgID, EmptyID, time.Now().UnixNano()) +} + +func NewSimpleRespMsgVal(protocol SubProtocol, msgID MsgID, originalID MsgID) *MessageValue { + return NewLiteMessageValue(protocol, msgID, originalID, time.Now().UnixNano()) +} + +func (m *MessageValue) Subprotocol() SubProtocol { + return m.subProtocol +} + +func (m *MessageValue) Length() uint32 { + return m.length + +} + +func (m *MessageValue) Timestamp() int64 { + return m.timestamp +} + +func (m *MessageValue) ID() MsgID { + return m.id +} + +func (m *MessageValue) OriginalID() MsgID { + return m.originalID +} + +func (m *MessageValue) Payload() []byte { + return m.payload +} + +func (m *MessageValue) SetPayload(payload []byte) { + m.payload = payload + m.length = uint32(len(payload)) +} + +var _ Message = (*MessageValue)(nil) + diff --git a/p2p/p2pcommon/msgid.go b/p2p/p2pcommon/msgid.go index 21d0c6fcf..d93c43852 100644 --- a/p2p/p2pcommon/msgid.go +++ b/p2p/p2pcommon/msgid.go @@ -46,5 +46,5 @@ func (id MsgID) UUID() uuid.UUID { } func (id MsgID) String() string { - return uuid.Must(uuid.FromBytes(id[:])).String() + return id.UUID().String() } diff --git a/p2p/p2pcommon/msgid_test.go b/p2p/p2pcommon/msgid_test.go index 904e6952f..4257c73fa 100644 --- a/p2p/p2pcommon/msgid_test.go +++ b/p2p/p2pcommon/msgid_test.go @@ -6,16 +6,18 @@ package p2pcommon import ( + "reflect" + "testing" + "github.com/gofrs/uuid" "github.com/magiconair/properties/assert" - "testing" ) -func TestMustParseBytes(t *testing.T) { +func TestParseBytesToMsgID(t *testing.T) { sampleUUID := uuid.Must(uuid.NewV4()) tests := []struct { - name string - in []byte + name string + in []byte expectErr bool }{ {"TSucc", sampleUUID[:], false}, @@ -23,10 +25,54 @@ func TestMustParseBytes(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - _, err := ParseBytesToMsgID(test.in) - assert.Equal(t, test.expectErr, err != nil) + got, err := ParseBytesToMsgID(test.in) + assert.Equal(t, test.expectErr, err != nil, "parse byte") + + got2, gotPanic := checkPanic(test.in) + assert.Equal(t, test.expectErr, gotPanic, "got panic") + if !test.expectErr && got != got2 { + t.Errorf("ParseBytes() and MustParse() were differ: %v , %v", got, got2) + } }) } } +func checkPanic(in []byte) (msg MsgID, gotPanic bool) { + defer func() { + if r := recover(); r != nil { + gotPanic = true + } + }() + + msg = MustParseBytes(in) + return +} + +func TestNewMsgID(t *testing.T) { + idMap := make(map[string]MsgID) + for i := 0; i < 100; i++ { + gotM := NewMsgID() + if _, exist := idMap[gotM.String()]; exist { + t.Errorf("NewMsgID() made duplication = %v", gotM.String()) + t.FailNow() + } + } +} + +func TestMsgID_UUID(t *testing.T) { + tests := []struct { + name string + id MsgID + want uuid.UUID + }{ + {"TEmpty", EmptyID, uuid.FromBytesOrNil(nil)}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.id.UUID(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("MsgID.UUID() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/p2p/p2pcommon/msgio.go b/p2p/p2pcommon/msgio.go index 8a1995557..3c1ae377a 100644 --- a/p2p/p2pcommon/msgio.go +++ b/p2p/p2pcommon/msgio.go @@ -5,9 +5,9 @@ package p2pcommon -// MsgReader read stream and return message object +// MsgReader read byte stream, parse stream with respect to protocol version and return message object used in p2p module type MsgReader interface { - // ReadMsg return types.MsgHeader as header, proto.Message as data + // ReadMsg return types.MsgHeader as header, MessageBody as data // The header and/or data can be nil if error is not nil ReadMsg() (Message, error) } diff --git a/p2p/p2pcommon/others.go b/p2p/p2pcommon/others.go index 9a8db4ffb..db34ab2ee 100644 --- a/p2p/p2pcommon/others.go +++ b/p2p/p2pcommon/others.go @@ -1,53 +1,26 @@ package p2pcommon import ( + "io" "time" "github.com/aergoio/aergo-actor/actor" "github.com/aergoio/aergo/message" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" - crypto "github.com/libp2p/go-libp2p-crypto" host "github.com/libp2p/go-libp2p-host" inet "github.com/libp2p/go-libp2p-net" peer "github.com/libp2p/go-libp2p-peer" protocol "github.com/libp2p/go-libp2p-protocol" ) -type RemotePeer interface { - ID() peer.ID - Meta() PeerMeta - ManageNumber() uint32 - Name() string - - State() types.PeerState - LastNotice() *types.LastBlockStatus - - RunPeer() - Stop() - - SendMessage(msg MsgOrder) - SendAndWaitMessage(msg MsgOrder, ttl time.Duration) error - - PushTxsNotice(txHashes []types.TxID) - // utility method - - ConsumeRequest(msgID MsgID) - GetReceiver(id MsgID) ResponseReceiver - - // updateBlkCache add hash to block cache and return true if this hash already exists. - UpdateBlkCache(blkHash []byte, blkNumber uint64) bool - // updateTxCache add hashes to transaction cache and return newly added hashes. - UpdateTxCache(hashes []types.TxID) []types.TxID - // updateLastNotice change estimate of the last status of remote peer - UpdateLastNotice(blkHash []byte, blkNumber uint64) - - // TODO - MF() MoFactory +// PeerAccessor is an interface for a another actor module to get info of peers +type PeerAccessor interface { + GetPeerBlockInfos() []types.PeerBlockInfo + GetPeer(ID peer.ID) (RemotePeer, bool) } -// msgOrder is abstraction information about the message that will be sent to peer -// some type of msgOrder, such as notice mo, should thread-safe and re-entrant +// MsgOrder is abstraction of information about the message that will be sent to peer. +// Some type of msgOrder, such as notice mo, should thread-safe and re-entrant type MsgOrder interface { GetMsgID() MsgID // Timestamp is unit time value @@ -60,15 +33,10 @@ type MsgOrder interface { SendTo(p RemotePeer) error } -type ResponseReceiver func(Message, proto.Message) bool -type PbMessage interface { - proto.Message -} - type MoFactory interface { - NewMsgRequestOrder(expecteResponse bool, protocolID SubProtocol, message PbMessage) MsgOrder - NewMsgBlockRequestOrder(respReceiver ResponseReceiver, protocolID SubProtocol, message PbMessage) MsgOrder - NewMsgResponseOrder(reqID MsgID, protocolID SubProtocol, message PbMessage) MsgOrder + NewMsgRequestOrder(expecteResponse bool, protocolID SubProtocol, message MessageBody) MsgOrder + NewMsgBlockRequestOrder(respReceiver ResponseReceiver, protocolID SubProtocol, message MessageBody) MsgOrder + NewMsgResponseOrder(reqID MsgID, protocolID SubProtocol, message MessageBody) MsgOrder NewMsgBlkBroadcastOrder(noticeMsg *types.NewBlockNotice) MsgOrder NewMsgTxBroadcastOrder(noticeMsg *types.NewTransactionsNotice) MsgOrder NewMsgBPBroadcastOrder(noticeMsg *types.BlockProducedNotice) MsgOrder @@ -86,8 +54,7 @@ type PeerManager interface { AddNewPeer(peer PeerMeta) // Remove peer from peer list. Peer dispose relative resources and stop itself, and then call RemovePeer to peermanager RemovePeer(peer RemotePeer) - // NotifyPeerHandshake is called after remote peer is completed handshake and ready to receive or send - NotifyPeerHandshake(peerID peer.ID) + NotifyPeerAddressReceived([]PeerMeta) // GetPeer return registered(handshaked) remote peer object @@ -127,23 +94,6 @@ type ActorService interface { GetChainAccessor() types.ChainAccessor } -// MessageHandler handle incoming subprotocol message -type MessageHandler interface { - ParsePayload([]byte) (proto.Message, error) - CheckAuth(msgHeader Message, msgBody proto.Message) error - Handle(msgHeader Message, msgBody proto.Message) - PreHandle() - PostHandle(msgHeader Message, msgBody proto.Message) -} - -// signHandler sign or verify p2p message -type MsgSigner interface { - // signMsg calulate signature and fill related fields in msg(peerid, pubkey, signature or etc) - SignMsg(msg *types.P2PMessage) error - // verifyMsg check signature is valid - VerifyMsg(msg *types.P2PMessage, senderID peer.ID) error -} - // NTContainer can provide NetworkTransport interface. type NTContainer interface { GetNetworkTransport() NetworkTransport @@ -159,19 +109,23 @@ type NetworkTransport interface { Start() error Stop() error - PrivateKey() crypto.PrivKey - PublicKey() crypto.PubKey SelfMeta() PeerMeta - SelfNodeID() peer.ID GetAddressesOfPeer(peerID peer.ID) []string // AddStreamHandler wrapper function which call host.SetStreamHandler after transport is initialized, this method is for preventing nil error. AddStreamHandler(pid protocol.ID, handler inet.StreamHandler) - GetOrCreateStream(meta PeerMeta, protocolID protocol.ID) (inet.Stream, error) - GetOrCreateStreamWithTTL(meta PeerMeta, protocolID protocol.ID, ttl time.Duration) (inet.Stream, error) + GetOrCreateStream(meta PeerMeta, protocolIDs ...protocol.ID) (inet.Stream, error) + GetOrCreateStreamWithTTL(meta PeerMeta, ttl time.Duration, protocolIDs ...protocol.ID) (inet.Stream, error) FindPeer(peerID peer.ID) bool ClosePeerConnection(peerID peer.ID) bool } + +// FlushableWriter is writer which have Flush method, such as bufio.Writer +type FlushableWriter interface { + io.Writer + // Flush writes any buffered data to the underlying io.Writer. + Flush() error +} \ No newline at end of file diff --git a/p2p/p2pcommon/peermeta.go b/p2p/p2pcommon/peermeta.go index e6c42267d..0fe13a163 100644 --- a/p2p/p2pcommon/peermeta.go +++ b/p2p/p2pcommon/peermeta.go @@ -10,6 +10,9 @@ import ( peer "github.com/libp2p/go-libp2p-peer" ) +const ( + UnknownVersion = "" +) // PeerMeta contains non changeable information of peer node during connected state // TODO: PeerMeta is almost same as PeerAddress, so TODO to unify them. type PeerMeta struct { @@ -19,15 +22,25 @@ type PeerMeta struct { Port uint32 Designated bool // Designated means this peer is designated in config file and connect to in startup phase + Version string Hidden bool // Hidden means that meta info of this peer will not be sent to other peers when getting peer list Outbound bool } +func (m *PeerMeta) GetVersion() string { + if m.Version == "" { + return "(old)" + } else { + return m.Version + } +} + // FromStatusToMeta create peerMeta from Status message func NewMetaFromStatus(status *types.Status, outbound bool) PeerMeta { meta := FromPeerAddress(status.Sender) meta.Hidden = status.NoExpose meta.Outbound = outbound + meta.Version = status.Version return meta } diff --git a/p2p/p2pcommon/peermeta_test.go b/p2p/p2pcommon/peermeta_test.go index 828ba0863..0be9e24c0 100644 --- a/p2p/p2pcommon/peermeta_test.go +++ b/p2p/p2pcommon/peermeta_test.go @@ -45,9 +45,9 @@ func TestFromPeerAddress(t *testing.T) { func TestNewMetaFromStatus(t *testing.T) { type args struct { - ip string - port uint32 - id string + ip string + port uint32 + id string noExpose bool outbound bool } @@ -57,12 +57,12 @@ func TestNewMetaFromStatus(t *testing.T) { }{ {"TExpose", args{"192.168.1.2", 2, "id0002", false, false}}, {"TNoExpose", args{"0.0.0.0", 2223, "id2223", true, false}}, - {"TOutbound", args{"2001:0db8:85a3:08d3:1319:8a2e:0370:7334", 444, "id0002",false, true}}, + {"TOutbound", args{"2001:0db8:85a3:08d3:1319:8a2e:0370:7334", 444, "id0002", false, true}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sender := &types.PeerAddress{Address:tt.args.ip, Port:tt.args.port, PeerID:[]byte(tt.args.id)} - status := &types.Status{Sender:sender, NoExpose:tt.args.noExpose} + sender := &types.PeerAddress{Address: tt.args.ip, Port: tt.args.port, PeerID: []byte(tt.args.id)} + status := &types.Status{Sender: sender, NoExpose: tt.args.noExpose} actual := NewMetaFromStatus(status, tt.args.outbound) assert.Equal(t, tt.args.ip, actual.IPAddress) assert.Equal(t, tt.args.port, actual.Port) diff --git a/p2p/p2pcommon/pool.go b/p2p/p2pcommon/pool.go new file mode 100644 index 000000000..25654414e --- /dev/null +++ b/p2p/p2pcommon/pool.go @@ -0,0 +1,79 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2pcommon + +import ( + "errors" + net "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "time" +) + +const ( + WaitingPeerManagerInterval = time.Minute + + PolarisQueryInterval = time.Minute * 10 + PeerQueryInterval = time.Hour + PeerFirstInterval = time.Second * 4 + + MaxConcurrentHandshake = 5 + +) + +var ( + ErrNoWaitings = errors.New("no waiting peer exists") +) + +type PeerEventListener interface { + OnPeerConnect(pid peer.ID) + OnPeerDisconnect(peer RemotePeer) +} + +// PeerFinder works for collecting peer candidate. +// It queries to Polaris or other connected peer efficiently. +// It determine if peer is +// NOTE that this object is not thread safe by itself. +type PeerFinder interface { + PeerEventListener + + // Check if it need to discover more peers and send query request to polaris or other peers if needed. + CheckAndFill() +} + +// WaitingPeerManager manage waiting peer pool and role to connect and handshaking of remote peer. +type WaitingPeerManager interface { + PeerEventListener + // OnDiscoveredPeers is called when response of discover query came from polaris or other peer. + // It returns the count of previously unknown peers. + OnDiscoveredPeers(metas []PeerMeta) int + // OnWorkDone + OnWorkDone(result ConnWorkResult) + + CheckAndConnect() + + OnInboundConn(s net.Stream) + + OnInboundConnLegacy(s net.Stream) +} + +type WaitingPeer struct { + Meta PeerMeta + TrialCnt int + NextTrial time.Time + + LastResult error +} + +type ConnWorkResult struct { + Inbound bool + Seq uint32 + // TargetPeer is nil if Inbound is true + TargetPeer *WaitingPeer + Meta PeerMeta + + P2PVer uint32 + Result error +} \ No newline at end of file diff --git a/p2p/p2pcommon/remotepeer.go b/p2p/p2pcommon/remotepeer.go new file mode 100644 index 000000000..378336ac5 --- /dev/null +++ b/p2p/p2pcommon/remotepeer.go @@ -0,0 +1,48 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2pcommon + +import ( + "github.com/aergoio/aergo/types" + "github.com/libp2p/go-libp2p-peer" + "time" +) + +type RemotePeer interface { + ID() peer.ID + Meta() PeerMeta + ManageNumber() uint32 + Name() string + Version() string + + AddMessageHandler(subProtocol SubProtocol, handler MessageHandler) + + State() types.PeerState + // LastStatus returns last observed status of remote peer. this value will be changed by notice, or ping + LastStatus() *types.LastBlockStatus + + RunPeer() + Stop() + + SendMessage(msg MsgOrder) + SendAndWaitMessage(msg MsgOrder, ttl time.Duration) error + + PushTxsNotice(txHashes []types.TxID) + // utility method + + ConsumeRequest(msgID MsgID) + GetReceiver(id MsgID) ResponseReceiver + + // updateBlkCache add hash to block cache and return true if this hash already exists. + UpdateBlkCache(blkHash []byte, blkNumber uint64) bool + // updateTxCache add hashes to transaction cache and return newly added hashes. + UpdateTxCache(hashes []types.TxID) []types.TxID + // updateLastNotice change estimate of the last status of remote peer + UpdateLastNotice(blkHash []byte, blkNumber uint64) + + // TODO + MF() MoFactory +} diff --git a/p2p/p2pkey/nodekey.go b/p2p/p2pkey/nodekey.go new file mode 100644 index 000000000..2b3f391bf --- /dev/null +++ b/p2p/p2pkey/nodekey.go @@ -0,0 +1,127 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2pkey + +import ( + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/internal/enc" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/aergoio/aergo/types" + "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-peer" + "os" + "path/filepath" + "time" +) + +type nodeInfo struct { + id peer.ID + sid string + pubKey crypto.PubKey + privKey crypto.PrivKey + + version string + startTime time.Time + +} + +var ni *nodeInfo + +// InitNodeInfo initializes node-specific informations like node id. +// Caution: this must be called before all the goroutines are started. +func InitNodeInfo(baseCfg *config.BaseConfig, p2pCfg *config.P2PConfig, version string, logger *log.Logger) { + // check Key and address + var ( + priv crypto.PrivKey + pub crypto.PubKey + err error + ) + + if p2pCfg.NPKey != "" { + priv, pub, err = p2putil.LoadKeyFile(p2pCfg.NPKey) + if err != nil { + panic("Failed to load Keyfile '" + p2pCfg.NPKey + "' " + err.Error()) + } + } else { + logger.Info().Msg("No private key file is configured, so use auto-generated pk file instead.") + + autogenFilePath := filepath.Join(baseCfg.AuthDir, p2pcommon.DefaultPkKeyPrefix+p2pcommon.DefaultPkKeyExt) + if _, err := os.Stat(autogenFilePath); os.IsNotExist(err) { + logger.Info().Str("pk_file", autogenFilePath).Msg("Generate new private key file.") + priv, pub, err = p2putil.GenerateKeyFile(baseCfg.AuthDir, p2pcommon.DefaultPkKeyPrefix) + if err != nil { + panic("Failed to generate new pk file: " + err.Error()) + } + } else { + logger.Info().Str("pk_file", autogenFilePath).Msg("Load existing generated private key file.") + priv, pub, err = p2putil.LoadKeyFile(autogenFilePath) + if err != nil { + panic("Failed to load generated pk file '" + autogenFilePath + "' " + err.Error()) + } + } + } + id, _ := peer.IDFromPublicKey(pub) + + ni = &nodeInfo{ + id: id, + sid: enc.ToString([]byte(id)), + pubKey: pub, + privKey: priv, + version: version, + startTime: time.Now(), + } + + p2putil.UseFullID = p2pCfg.LogFullPeerID +} + +// NodeID returns the node id. +func NodeID() peer.ID { + return ni.id +} + +// NodeSID returns the string representation of the node id. +func NodeSID() string { + if ni == nil { + return "" + } + return ni.sid +} + +// NodePrivKey returns the private key of the node. +func NodePrivKey() crypto.PrivKey { + return ni.privKey +} + +// NodePubKey returns the public key of the node. +func NodePubKey() crypto.PubKey { + return ni.pubKey +} + +// NodeVersion returns the version of this binary. TODO: It's not good that version info is in p2pkey package +func NodeVersion() string { + return ni.version +} + +func StartTime() time.Time { + return ni.startTime +} + +func GetHostAccessor() types.HostAccessor { + return simpleHostAccessor{} +} + +type simpleHostAccessor struct {} + +func (simpleHostAccessor) Version() string { + return ni.version +} + +func (simpleHostAccessor) StartTime() time.Time { + return ni.startTime +} + diff --git a/p2p/p2pmock/mock_chainaccessor.go b/p2p/p2pmock/mock_chainaccessor.go index b8f652ca9..d33e1357e 100644 --- a/p2p/p2pmock/mock_chainaccessor.go +++ b/p2p/p2pmock/mock_chainaccessor.go @@ -62,6 +62,20 @@ func (mr *MockChainAccessorMockRecorder) GetConsensusInfo() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConsensusInfo", reflect.TypeOf((*MockChainAccessor)(nil).GetConsensusInfo)) } +// GetChainStats mocks base method +func (m *MockChainAccessor) GetChainStats() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChainStats") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetConsensusInfo indicates an expected call of GetConsensusInfo +func (mr *MockChainAccessorMockRecorder) GetChainStats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChainStats", reflect.TypeOf((*MockChainAccessor)(nil).GetChainStats)) +} + // GetBestBlock mocks base method func (m *MockChainAccessor) GetBestBlock() (*types.Block, error) { m.ctrl.T.Helper() diff --git a/p2p/p2pmock/mock_handshake.go b/p2p/p2pmock/mock_handshake.go new file mode 100644 index 000000000..cbd4f3d6b --- /dev/null +++ b/p2p/p2pmock/mock_handshake.go @@ -0,0 +1,207 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: p2p/p2pcommon/handshake.go + +// Package p2pmock is a generated GoMock package. +package p2pmock + +import ( + context "context" + p2pcommon "github.com/aergoio/aergo/p2p/p2pcommon" + types "github.com/aergoio/aergo/types" + gomock "github.com/golang/mock/gomock" + go_libp2p_peer "github.com/libp2p/go-libp2p-peer" + io "io" + reflect "reflect" + time "time" +) + +// MockHSHandlerFactory is a mock of HSHandlerFactory interface +type MockHSHandlerFactory struct { + ctrl *gomock.Controller + recorder *MockHSHandlerFactoryMockRecorder +} + +// MockHSHandlerFactoryMockRecorder is the mock recorder for MockHSHandlerFactory +type MockHSHandlerFactoryMockRecorder struct { + mock *MockHSHandlerFactory +} + +// NewMockHSHandlerFactory creates a new mock instance +func NewMockHSHandlerFactory(ctrl *gomock.Controller) *MockHSHandlerFactory { + mock := &MockHSHandlerFactory{ctrl: ctrl} + mock.recorder = &MockHSHandlerFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockHSHandlerFactory) EXPECT() *MockHSHandlerFactoryMockRecorder { + return m.recorder +} + +// CreateHSHandler mocks base method +func (m *MockHSHandlerFactory) CreateHSHandler(p2pVersion p2pcommon.P2PVersion, outbound bool, pid go_libp2p_peer.ID) p2pcommon.HSHandler { + ret := m.ctrl.Call(m, "CreateHSHandler", p2pVersion, outbound, pid) + ret0, _ := ret[0].(p2pcommon.HSHandler) + return ret0 +} + +// CreateHSHandler indicates an expected call of CreateHSHandler +func (mr *MockHSHandlerFactoryMockRecorder) CreateHSHandler(p2pVersion, outbound, pid interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHSHandler", reflect.TypeOf((*MockHSHandlerFactory)(nil).CreateHSHandler), p2pVersion, outbound, pid) +} + +// MockHSHandler is a mock of HSHandler interface +type MockHSHandler struct { + ctrl *gomock.Controller + recorder *MockHSHandlerMockRecorder +} + +// MockHSHandlerMockRecorder is the mock recorder for MockHSHandler +type MockHSHandlerMockRecorder struct { + mock *MockHSHandler +} + +// NewMockHSHandler creates a new mock instance +func NewMockHSHandler(ctrl *gomock.Controller) *MockHSHandler { + mock := &MockHSHandler{ctrl: ctrl} + mock.recorder = &MockHSHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockHSHandler) EXPECT() *MockHSHandlerMockRecorder { + return m.recorder +} + +// Handle mocks base method +func (m *MockHSHandler) Handle(r io.Reader, w io.Writer, ttl time.Duration) (p2pcommon.MsgReadWriter, *types.Status, error) { + ret := m.ctrl.Call(m, "Handle", r, w, ttl) + ret0, _ := ret[0].(p2pcommon.MsgReadWriter) + ret1, _ := ret[1].(*types.Status) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// Handle indicates an expected call of Handle +func (mr *MockHSHandlerMockRecorder) Handle(r, w, ttl interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Handle", reflect.TypeOf((*MockHSHandler)(nil).Handle), r, w, ttl) +} + +// MockVersionedManager is a mock of VersionedManager interface +type MockVersionedManager struct { + ctrl *gomock.Controller + recorder *MockVersionedManagerMockRecorder +} + +// MockVersionedManagerMockRecorder is the mock recorder for MockVersionedManager +type MockVersionedManagerMockRecorder struct { + mock *MockVersionedManager +} + +// NewMockVersionedManager creates a new mock instance +func NewMockVersionedManager(ctrl *gomock.Controller) *MockVersionedManager { + mock := &MockVersionedManager{ctrl: ctrl} + mock.recorder = &MockVersionedManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockVersionedManager) EXPECT() *MockVersionedManagerMockRecorder { + return m.recorder +} + +// FindBestP2PVersion mocks base method +func (m *MockVersionedManager) FindBestP2PVersion(versions []p2pcommon.P2PVersion) p2pcommon.P2PVersion { + ret := m.ctrl.Call(m, "FindBestP2PVersion", versions) + ret0, _ := ret[0].(p2pcommon.P2PVersion) + return ret0 +} + +// FindBestP2PVersion indicates an expected call of FindBestP2PVersion +func (mr *MockVersionedManagerMockRecorder) FindBestP2PVersion(versions interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindBestP2PVersion", reflect.TypeOf((*MockVersionedManager)(nil).FindBestP2PVersion), versions) +} + +// GetVersionedHandshaker mocks base method +func (m *MockVersionedManager) GetVersionedHandshaker(version p2pcommon.P2PVersion, peerID go_libp2p_peer.ID, r io.Reader, w io.Writer) (p2pcommon.VersionedHandshaker, error) { + ret := m.ctrl.Call(m, "GetVersionedHandshaker", version, peerID, r, w) + ret0, _ := ret[0].(p2pcommon.VersionedHandshaker) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVersionedHandshaker indicates an expected call of GetVersionedHandshaker +func (mr *MockVersionedManagerMockRecorder) GetVersionedHandshaker(version, peerID, r, w interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVersionedHandshaker", reflect.TypeOf((*MockVersionedManager)(nil).GetVersionedHandshaker), version, peerID, r, w) +} + +// InjectHandlers mocks base method +func (m *MockVersionedManager) InjectHandlers(version p2pcommon.P2PVersion, peer p2pcommon.RemotePeer) { + m.ctrl.Call(m, "InjectHandlers", version, peer) +} + +// InjectHandlers indicates an expected call of InjectHandlers +func (mr *MockVersionedManagerMockRecorder) InjectHandlers(version, peer interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InjectHandlers", reflect.TypeOf((*MockVersionedManager)(nil).InjectHandlers), version, peer) +} + +// MockVersionedHandshaker is a mock of VersionedHandshaker interface +type MockVersionedHandshaker struct { + ctrl *gomock.Controller + recorder *MockVersionedHandshakerMockRecorder +} + +// MockVersionedHandshakerMockRecorder is the mock recorder for MockVersionedHandshaker +type MockVersionedHandshakerMockRecorder struct { + mock *MockVersionedHandshaker +} + +// NewMockVersionedHandshaker creates a new mock instance +func NewMockVersionedHandshaker(ctrl *gomock.Controller) *MockVersionedHandshaker { + mock := &MockVersionedHandshaker{ctrl: ctrl} + mock.recorder = &MockVersionedHandshakerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockVersionedHandshaker) EXPECT() *MockVersionedHandshakerMockRecorder { + return m.recorder +} + +// DoForOutbound mocks base method +func (m *MockVersionedHandshaker) DoForOutbound(ctx context.Context) (*types.Status, error) { + ret := m.ctrl.Call(m, "DoForOutbound", ctx) + ret0, _ := ret[0].(*types.Status) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DoForOutbound indicates an expected call of DoForOutbound +func (mr *MockVersionedHandshakerMockRecorder) DoForOutbound(ctx interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoForOutbound", reflect.TypeOf((*MockVersionedHandshaker)(nil).DoForOutbound), ctx) +} + +// DoForInbound mocks base method +func (m *MockVersionedHandshaker) DoForInbound(ctx context.Context) (*types.Status, error) { + ret := m.ctrl.Call(m, "DoForInbound", ctx) + ret0, _ := ret[0].(*types.Status) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DoForInbound indicates an expected call of DoForInbound +func (mr *MockVersionedHandshakerMockRecorder) DoForInbound(ctx interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoForInbound", reflect.TypeOf((*MockVersionedHandshaker)(nil).DoForInbound), ctx) +} + +// GetMsgRW mocks base method +func (m *MockVersionedHandshaker) GetMsgRW() p2pcommon.MsgReadWriter { + ret := m.ctrl.Call(m, "GetMsgRW") + ret0, _ := ret[0].(p2pcommon.MsgReadWriter) + return ret0 +} + +// GetMsgRW indicates an expected call of GetMsgRW +func (mr *MockVersionedHandshakerMockRecorder) GetMsgRW() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMsgRW", reflect.TypeOf((*MockVersionedHandshaker)(nil).GetMsgRW)) +} diff --git a/p2p/p2pmock/mock_message.go b/p2p/p2pmock/mock_message.go index d113c412d..e1d2b9c0e 100644 --- a/p2p/p2pmock/mock_message.go +++ b/p2p/p2pmock/mock_message.go @@ -1,14 +1,13 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/aergoio/aergo/p2p/p2pcommon (interfaces: Message) +// Source: p2p/p2pcommon/message.go -// Package mock_p2pcommon is a generated GoMock package. +// Package p2pmock is a generated GoMock package. package p2pmock import ( - reflect "reflect" - p2pcommon "github.com/aergoio/aergo/p2p/p2pcommon" gomock "github.com/golang/mock/gomock" + reflect "reflect" ) // MockMessage is a mock of Message interface @@ -34,23 +33,20 @@ func (m *MockMessage) EXPECT() *MockMessageMockRecorder { return m.recorder } -// ID mocks base method -func (m *MockMessage) ID() p2pcommon.MsgID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ID") - ret0, _ := ret[0].(p2pcommon.MsgID) +// Subprotocol mocks base method +func (m *MockMessage) Subprotocol() p2pcommon.SubProtocol { + ret := m.ctrl.Call(m, "Subprotocol") + ret0, _ := ret[0].(p2pcommon.SubProtocol) return ret0 } -// ID indicates an expected call of ID -func (mr *MockMessageMockRecorder) ID() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockMessage)(nil).ID)) +// Subprotocol indicates an expected call of Subprotocol +func (mr *MockMessageMockRecorder) Subprotocol() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subprotocol", reflect.TypeOf((*MockMessage)(nil).Subprotocol)) } // Length mocks base method func (m *MockMessage) Length() uint32 { - m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Length") ret0, _ := ret[0].(uint32) return ret0 @@ -58,13 +54,35 @@ func (m *MockMessage) Length() uint32 { // Length indicates an expected call of Length func (mr *MockMessageMockRecorder) Length() *gomock.Call { - mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Length", reflect.TypeOf((*MockMessage)(nil).Length)) } +// Timestamp mocks base method +func (m *MockMessage) Timestamp() int64 { + ret := m.ctrl.Call(m, "Timestamp") + ret0, _ := ret[0].(int64) + return ret0 +} + +// Timestamp indicates an expected call of Timestamp +func (mr *MockMessageMockRecorder) Timestamp() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockMessage)(nil).Timestamp)) +} + +// ID mocks base method +func (m *MockMessage) ID() p2pcommon.MsgID { + ret := m.ctrl.Call(m, "ID") + ret0, _ := ret[0].(p2pcommon.MsgID) + return ret0 +} + +// ID indicates an expected call of ID +func (mr *MockMessageMockRecorder) ID() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockMessage)(nil).ID)) +} + // OriginalID mocks base method func (m *MockMessage) OriginalID() p2pcommon.MsgID { - m.ctrl.T.Helper() ret := m.ctrl.Call(m, "OriginalID") ret0, _ := ret[0].(p2pcommon.MsgID) return ret0 @@ -72,13 +90,11 @@ func (m *MockMessage) OriginalID() p2pcommon.MsgID { // OriginalID indicates an expected call of OriginalID func (mr *MockMessageMockRecorder) OriginalID() *gomock.Call { - mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OriginalID", reflect.TypeOf((*MockMessage)(nil).OriginalID)) } // Payload mocks base method func (m *MockMessage) Payload() []byte { - m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Payload") ret0, _ := ret[0].([]byte) return ret0 @@ -86,34 +102,38 @@ func (m *MockMessage) Payload() []byte { // Payload indicates an expected call of Payload func (mr *MockMessageMockRecorder) Payload() *gomock.Call { - mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Payload", reflect.TypeOf((*MockMessage)(nil).Payload)) } -// Subprotocol mocks base method -func (m *MockMessage) Subprotocol() p2pcommon.SubProtocol { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subprotocol") - ret0, _ := ret[0].(p2pcommon.SubProtocol) - return ret0 +// MockHandlerFactory is a mock of HandlerFactory interface +type MockHandlerFactory struct { + ctrl *gomock.Controller + recorder *MockHandlerFactoryMockRecorder } -// Subprotocol indicates an expected call of Subprotocol -func (mr *MockMessageMockRecorder) Subprotocol() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subprotocol", reflect.TypeOf((*MockMessage)(nil).Subprotocol)) +// MockHandlerFactoryMockRecorder is the mock recorder for MockHandlerFactory +type MockHandlerFactoryMockRecorder struct { + mock *MockHandlerFactory } -// Timestamp mocks base method -func (m *MockMessage) Timestamp() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Timestamp") - ret0, _ := ret[0].(int64) - return ret0 +// NewMockHandlerFactory creates a new mock instance +func NewMockHandlerFactory(ctrl *gomock.Controller) *MockHandlerFactory { + mock := &MockHandlerFactory{ctrl: ctrl} + mock.recorder = &MockHandlerFactoryMockRecorder{mock} + return mock } -// Timestamp indicates an expected call of Timestamp -func (mr *MockMessageMockRecorder) Timestamp() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockMessage)(nil).Timestamp)) +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockHandlerFactory) EXPECT() *MockHandlerFactoryMockRecorder { + return m.recorder +} + +// InsertHandlers mocks base method +func (m *MockHandlerFactory) InsertHandlers(peer p2pcommon.RemotePeer) { + m.ctrl.Call(m, "InsertHandlers", peer) +} + +// InsertHandlers indicates an expected call of InsertHandlers +func (mr *MockHandlerFactoryMockRecorder) InsertHandlers(peer interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InsertHandlers", reflect.TypeOf((*MockHandlerFactory)(nil).InsertHandlers), peer) } diff --git a/p2p/p2pmock/mock_messagehandler.go b/p2p/p2pmock/mock_messagehandler.go index c46c8612c..cf0b1c288 100644 --- a/p2p/p2pmock/mock_messagehandler.go +++ b/p2p/p2pmock/mock_messagehandler.go @@ -5,11 +5,10 @@ package p2pmock import ( - reflect "reflect" + "reflect" - p2pcommon "github.com/aergoio/aergo/p2p/p2pcommon" - gomock "github.com/golang/mock/gomock" - proto "github.com/golang/protobuf/proto" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/golang/mock/gomock" ) // MockMessageHandler is a mock of MessageHandler interface @@ -36,7 +35,7 @@ func (m *MockMessageHandler) EXPECT() *MockMessageHandlerMockRecorder { } // CheckAuth mocks base method -func (m *MockMessageHandler) CheckAuth(arg0 p2pcommon.Message, arg1 proto.Message) error { +func (m *MockMessageHandler) CheckAuth(arg0 p2pcommon.Message, arg1 p2pcommon.MessageBody) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CheckAuth", arg0, arg1) ret0, _ := ret[0].(error) @@ -50,7 +49,7 @@ func (mr *MockMessageHandlerMockRecorder) CheckAuth(arg0, arg1 interface{}) *gom } // Handle mocks base method -func (m *MockMessageHandler) Handle(arg0 p2pcommon.Message, arg1 proto.Message) { +func (m *MockMessageHandler) Handle(arg0 p2pcommon.Message, arg1 p2pcommon.MessageBody) { m.ctrl.T.Helper() m.ctrl.Call(m, "Handle", arg0, arg1) } @@ -62,10 +61,10 @@ func (mr *MockMessageHandlerMockRecorder) Handle(arg0, arg1 interface{}) *gomock } // ParsePayload mocks base method -func (m *MockMessageHandler) ParsePayload(arg0 []byte) (proto.Message, error) { +func (m *MockMessageHandler) ParsePayload(arg0 []byte) (p2pcommon.MessageBody, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ParsePayload", arg0) - ret0, _ := ret[0].(proto.Message) + ret0, _ := ret[0].(p2pcommon.MessageBody) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -77,7 +76,7 @@ func (mr *MockMessageHandlerMockRecorder) ParsePayload(arg0 interface{}) *gomock } // PostHandle mocks base method -func (m *MockMessageHandler) PostHandle(arg0 p2pcommon.Message, arg1 proto.Message) { +func (m *MockMessageHandler) PostHandle(arg0 p2pcommon.Message, arg1 p2pcommon.MessageBody) { m.ctrl.T.Helper() m.ctrl.Call(m, "PostHandle", arg0, arg1) } diff --git a/p2p/p2pmock/mock_mofactory.go b/p2p/p2pmock/mock_mofactory.go index 4b8994fca..20899029c 100644 --- a/p2p/p2pmock/mock_mofactory.go +++ b/p2p/p2pmock/mock_mofactory.go @@ -63,7 +63,7 @@ func (mr *MockMoFactoryMockRecorder) NewMsgBlkBroadcastOrder(arg0 interface{}) * } // NewMsgBlockRequestOrder mocks base method -func (m *MockMoFactory) NewMsgBlockRequestOrder(arg0 p2pcommon.ResponseReceiver, arg1 p2pcommon.SubProtocol, arg2 p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (m *MockMoFactory) NewMsgBlockRequestOrder(arg0 p2pcommon.ResponseReceiver, arg1 p2pcommon.SubProtocol, arg2 p2pcommon.MessageBody) p2pcommon.MsgOrder { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewMsgBlockRequestOrder", arg0, arg1, arg2) ret0, _ := ret[0].(p2pcommon.MsgOrder) @@ -77,7 +77,7 @@ func (mr *MockMoFactoryMockRecorder) NewMsgBlockRequestOrder(arg0, arg1, arg2 in } // NewMsgRequestOrder mocks base method -func (m *MockMoFactory) NewMsgRequestOrder(arg0 bool, arg1 p2pcommon.SubProtocol, arg2 p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (m *MockMoFactory) NewMsgRequestOrder(arg0 bool, arg1 p2pcommon.SubProtocol, arg2 p2pcommon.MessageBody) p2pcommon.MsgOrder { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewMsgRequestOrder", arg0, arg1, arg2) ret0, _ := ret[0].(p2pcommon.MsgOrder) @@ -91,7 +91,7 @@ func (mr *MockMoFactoryMockRecorder) NewMsgRequestOrder(arg0, arg1, arg2 interfa } // NewMsgResponseOrder mocks base method -func (m *MockMoFactory) NewMsgResponseOrder(arg0 p2pcommon.MsgID, arg1 p2pcommon.SubProtocol, arg2 p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (m *MockMoFactory) NewMsgResponseOrder(arg0 p2pcommon.MsgID, arg1 p2pcommon.SubProtocol, arg2 p2pcommon.MessageBody) p2pcommon.MsgOrder { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewMsgResponseOrder", arg0, arg1, arg2) ret0, _ := ret[0].(p2pcommon.MsgOrder) diff --git a/p2p/p2pmock/mock_networktransport.go b/p2p/p2pmock/mock_networktransport.go index 499cb8cf9..4189e9389 100644 --- a/p2p/p2pmock/mock_networktransport.go +++ b/p2p/p2pmock/mock_networktransport.go @@ -331,8 +331,8 @@ func (mr *MockNetworkTransportMockRecorder) AddStreamHandler(pid, handler interf } // GetOrCreateStream mocks base method -func (m *MockNetworkTransport) GetOrCreateStream(meta p2pcommon.PeerMeta, protocolID go_libp2p_protocol.ID) (go_libp2p_net.Stream, error) { - ret := m.ctrl.Call(m, "GetOrCreateStream", meta, protocolID) +func (m *MockNetworkTransport) GetOrCreateStream(meta p2pcommon.PeerMeta, protocolIDs ...go_libp2p_protocol.ID) (go_libp2p_net.Stream, error) { + ret := m.ctrl.Call(m, "GetOrCreateStream", meta, protocolIDs) ret0, _ := ret[0].(go_libp2p_net.Stream) ret1, _ := ret[1].(error) return ret0, ret1 @@ -344,8 +344,8 @@ func (mr *MockNetworkTransportMockRecorder) GetOrCreateStream(meta, protocolID i } // GetOrCreateStreamWithTTL mocks base method -func (m *MockNetworkTransport) GetOrCreateStreamWithTTL(meta p2pcommon.PeerMeta, protocolID go_libp2p_protocol.ID, ttl time.Duration) (go_libp2p_net.Stream, error) { - ret := m.ctrl.Call(m, "GetOrCreateStreamWithTTL", meta, protocolID, ttl) +func (m *MockNetworkTransport) GetOrCreateStreamWithTTL(meta p2pcommon.PeerMeta, ttl time.Duration, protocolIDs ...go_libp2p_protocol.ID) (go_libp2p_net.Stream, error) { + ret := m.ctrl.Call(m, "GetOrCreateStreamWithTTL", meta, protocolIDs, ttl) ret0, _ := ret[0].(go_libp2p_net.Stream) ret1, _ := ret[1].(error) return ret0, ret1 diff --git a/p2p/p2pmock/mock_peerfinder.go b/p2p/p2pmock/mock_peerfinder.go new file mode 100644 index 000000000..601cedbc8 --- /dev/null +++ b/p2p/p2pmock/mock_peerfinder.go @@ -0,0 +1,204 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: p2p/p2pcommon/pool.go + +// Package p2pmock is a generated GoMock package. +package p2pmock + +import ( + p2pcommon "github.com/aergoio/aergo/p2p/p2pcommon" + gomock "github.com/golang/mock/gomock" + go_libp2p_net "github.com/libp2p/go-libp2p-net" + go_libp2p_peer "github.com/libp2p/go-libp2p-peer" + reflect "reflect" +) + +// MockPeerEventListener is a mock of PeerEventListener interface +type MockPeerEventListener struct { + ctrl *gomock.Controller + recorder *MockPeerEventListenerMockRecorder +} + +// MockPeerEventListenerMockRecorder is the mock recorder for MockPeerEventListener +type MockPeerEventListenerMockRecorder struct { + mock *MockPeerEventListener +} + +// NewMockPeerEventListener creates a new mock instance +func NewMockPeerEventListener(ctrl *gomock.Controller) *MockPeerEventListener { + mock := &MockPeerEventListener{ctrl: ctrl} + mock.recorder = &MockPeerEventListenerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockPeerEventListener) EXPECT() *MockPeerEventListenerMockRecorder { + return m.recorder +} + +// OnPeerConnect mocks base method +func (m *MockPeerEventListener) OnPeerConnect(pid go_libp2p_peer.ID) { + m.ctrl.Call(m, "OnPeerConnect", pid) +} + +// OnPeerConnect indicates an expected call of OnPeerConnect +func (mr *MockPeerEventListenerMockRecorder) OnPeerConnect(pid interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnPeerConnect", reflect.TypeOf((*MockPeerEventListener)(nil).OnPeerConnect), pid) +} + +// OnPeerDisconnect mocks base method +func (m *MockPeerEventListener) OnPeerDisconnect(peer p2pcommon.RemotePeer) { + m.ctrl.Call(m, "OnPeerDisconnect", peer) +} + +// OnPeerDisconnect indicates an expected call of OnPeerDisconnect +func (mr *MockPeerEventListenerMockRecorder) OnPeerDisconnect(peer interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnPeerDisconnect", reflect.TypeOf((*MockPeerEventListener)(nil).OnPeerDisconnect), peer) +} + +// MockPeerFinder is a mock of PeerFinder interface +type MockPeerFinder struct { + ctrl *gomock.Controller + recorder *MockPeerFinderMockRecorder +} + +// MockPeerFinderMockRecorder is the mock recorder for MockPeerFinder +type MockPeerFinderMockRecorder struct { + mock *MockPeerFinder +} + +// NewMockPeerFinder creates a new mock instance +func NewMockPeerFinder(ctrl *gomock.Controller) *MockPeerFinder { + mock := &MockPeerFinder{ctrl: ctrl} + mock.recorder = &MockPeerFinderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockPeerFinder) EXPECT() *MockPeerFinderMockRecorder { + return m.recorder +} + +// OnPeerConnect mocks base method +func (m *MockPeerFinder) OnPeerConnect(pid go_libp2p_peer.ID) { + m.ctrl.Call(m, "OnPeerConnect", pid) +} + +// OnPeerConnect indicates an expected call of OnPeerConnect +func (mr *MockPeerFinderMockRecorder) OnPeerConnect(pid interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnPeerConnect", reflect.TypeOf((*MockPeerFinder)(nil).OnPeerConnect), pid) +} + +// OnPeerDisconnect mocks base method +func (m *MockPeerFinder) OnPeerDisconnect(peer p2pcommon.RemotePeer) { + m.ctrl.Call(m, "OnPeerDisconnect", peer) +} + +// OnPeerDisconnect indicates an expected call of OnPeerDisconnect +func (mr *MockPeerFinderMockRecorder) OnPeerDisconnect(peer interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnPeerDisconnect", reflect.TypeOf((*MockPeerFinder)(nil).OnPeerDisconnect), peer) +} + +// CheckAndFill mocks base method +func (m *MockPeerFinder) CheckAndFill() { + m.ctrl.Call(m, "CheckAndFill") +} + +// CheckAndFill indicates an expected call of CheckAndFill +func (mr *MockPeerFinderMockRecorder) CheckAndFill() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckAndFill", reflect.TypeOf((*MockPeerFinder)(nil).CheckAndFill)) +} + +// MockWaitingPeerManager is a mock of WaitingPeerManager interface +type MockWaitingPeerManager struct { + ctrl *gomock.Controller + recorder *MockWaitingPeerManagerMockRecorder +} + +// MockWaitingPeerManagerMockRecorder is the mock recorder for MockWaitingPeerManager +type MockWaitingPeerManagerMockRecorder struct { + mock *MockWaitingPeerManager +} + +// NewMockWaitingPeerManager creates a new mock instance +func NewMockWaitingPeerManager(ctrl *gomock.Controller) *MockWaitingPeerManager { + mock := &MockWaitingPeerManager{ctrl: ctrl} + mock.recorder = &MockWaitingPeerManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockWaitingPeerManager) EXPECT() *MockWaitingPeerManagerMockRecorder { + return m.recorder +} + +// OnPeerConnect mocks base method +func (m *MockWaitingPeerManager) OnPeerConnect(pid go_libp2p_peer.ID) { + m.ctrl.Call(m, "OnPeerConnect", pid) +} + +// OnPeerConnect indicates an expected call of OnPeerConnect +func (mr *MockWaitingPeerManagerMockRecorder) OnPeerConnect(pid interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnPeerConnect", reflect.TypeOf((*MockWaitingPeerManager)(nil).OnPeerConnect), pid) +} + +// OnPeerDisconnect mocks base method +func (m *MockWaitingPeerManager) OnPeerDisconnect(peer p2pcommon.RemotePeer) { + m.ctrl.Call(m, "OnPeerDisconnect", peer) +} + +// OnPeerDisconnect indicates an expected call of OnPeerDisconnect +func (mr *MockWaitingPeerManagerMockRecorder) OnPeerDisconnect(peer interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnPeerDisconnect", reflect.TypeOf((*MockWaitingPeerManager)(nil).OnPeerDisconnect), peer) +} + +// OnDiscoveredPeers mocks base method +func (m *MockWaitingPeerManager) OnDiscoveredPeers(metas []p2pcommon.PeerMeta) int { + ret := m.ctrl.Call(m, "OnDiscoveredPeers", metas) + ret0, _ := ret[0].(int) + return ret0 +} + +// OnDiscoveredPeers indicates an expected call of OnDiscoveredPeers +func (mr *MockWaitingPeerManagerMockRecorder) OnDiscoveredPeers(metas interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnDiscoveredPeers", reflect.TypeOf((*MockWaitingPeerManager)(nil).OnDiscoveredPeers), metas) +} + +// OnWorkDone mocks base method +func (m *MockWaitingPeerManager) OnWorkDone(result p2pcommon.ConnWorkResult) { + m.ctrl.Call(m, "OnWorkDone", result) +} + +// OnWorkDone indicates an expected call of OnWorkDone +func (mr *MockWaitingPeerManagerMockRecorder) OnWorkDone(result interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnWorkDone", reflect.TypeOf((*MockWaitingPeerManager)(nil).OnWorkDone), result) +} + +// CheckAndConnect mocks base method +func (m *MockWaitingPeerManager) CheckAndConnect() { + m.ctrl.Call(m, "CheckAndConnect") +} + +// CheckAndConnect indicates an expected call of CheckAndConnect +func (mr *MockWaitingPeerManagerMockRecorder) CheckAndConnect() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckAndConnect", reflect.TypeOf((*MockWaitingPeerManager)(nil).CheckAndConnect)) +} + +// OnInboundConn mocks base method +func (m *MockWaitingPeerManager) OnInboundConn(s go_libp2p_net.Stream) { + m.ctrl.Call(m, "OnInboundConn", s) +} + +// OnInboundConn indicates an expected call of OnInboundConn +func (mr *MockWaitingPeerManagerMockRecorder) OnInboundConn(s interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnInboundConn", reflect.TypeOf((*MockWaitingPeerManager)(nil).OnInboundConn), s) +} + +// OnInboundConnLegacy mocks base method +func (m *MockWaitingPeerManager) OnInboundConnLegacy(s go_libp2p_net.Stream) { + m.ctrl.Call(m, "OnInboundConnLegacy", s) +} + +// OnInboundConnLegacy indicates an expected call of OnInboundConnLegacy +func (mr *MockWaitingPeerManagerMockRecorder) OnInboundConnLegacy(s interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnInboundConnLegacy", reflect.TypeOf((*MockWaitingPeerManager)(nil).OnInboundConnLegacy), s) +} diff --git a/p2p/p2pmock/mock_remotepeer.go b/p2p/p2pmock/mock_remotepeer.go index bbab189be..f2e889987 100644 --- a/p2p/p2pmock/mock_remotepeer.go +++ b/p2p/p2pmock/mock_remotepeer.go @@ -1,17 +1,16 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/aergoio/aergo/p2p/p2pcommon (interfaces: RemotePeer) +// Source: p2p/p2pcommon/remotepeer.go -// Package mock_p2pcommon is a generated GoMock package. +// Package p2pmock is a generated GoMock package. package p2pmock import ( - reflect "reflect" - time "time" - p2pcommon "github.com/aergoio/aergo/p2p/p2pcommon" types "github.com/aergoio/aergo/types" gomock "github.com/golang/mock/gomock" go_libp2p_peer "github.com/libp2p/go-libp2p-peer" + reflect "reflect" + time "time" ) // MockRemotePeer is a mock of RemotePeer interface @@ -37,35 +36,8 @@ func (m *MockRemotePeer) EXPECT() *MockRemotePeerMockRecorder { return m.recorder } -// ConsumeRequest mocks base method -func (m *MockRemotePeer) ConsumeRequest(arg0 p2pcommon.MsgID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ConsumeRequest", arg0) -} - -// ConsumeRequest indicates an expected call of ConsumeRequest -func (mr *MockRemotePeerMockRecorder) ConsumeRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConsumeRequest", reflect.TypeOf((*MockRemotePeer)(nil).ConsumeRequest), arg0) -} - -// GetReceiver mocks base method -func (m *MockRemotePeer) GetReceiver(arg0 p2pcommon.MsgID) p2pcommon.ResponseReceiver { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetReceiver", arg0) - ret0, _ := ret[0].(p2pcommon.ResponseReceiver) - return ret0 -} - -// GetReceiver indicates an expected call of GetReceiver -func (mr *MockRemotePeerMockRecorder) GetReceiver(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReceiver", reflect.TypeOf((*MockRemotePeer)(nil).GetReceiver), arg0) -} - // ID mocks base method func (m *MockRemotePeer) ID() go_libp2p_peer.ID { - m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ID") ret0, _ := ret[0].(go_libp2p_peer.ID) return ret0 @@ -73,41 +45,23 @@ func (m *MockRemotePeer) ID() go_libp2p_peer.ID { // ID indicates an expected call of ID func (mr *MockRemotePeerMockRecorder) ID() *gomock.Call { - mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockRemotePeer)(nil).ID)) } -// LastNotice mocks base method -func (m *MockRemotePeer) LastNotice() *types.LastBlockStatus { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastNotice") - ret0, _ := ret[0].(*types.LastBlockStatus) - return ret0 -} - -// LastNotice indicates an expected call of LastNotice -func (mr *MockRemotePeerMockRecorder) LastNotice() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastNotice", reflect.TypeOf((*MockRemotePeer)(nil).LastNotice)) -} - -// MF mocks base method -func (m *MockRemotePeer) MF() p2pcommon.MoFactory { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MF") - ret0, _ := ret[0].(p2pcommon.MoFactory) +// Meta mocks base method +func (m *MockRemotePeer) Meta() p2pcommon.PeerMeta { + ret := m.ctrl.Call(m, "Meta") + ret0, _ := ret[0].(p2pcommon.PeerMeta) return ret0 } -// MF indicates an expected call of MF -func (mr *MockRemotePeerMockRecorder) MF() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MF", reflect.TypeOf((*MockRemotePeer)(nil).MF)) +// Meta indicates an expected call of Meta +func (mr *MockRemotePeerMockRecorder) Meta() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Meta", reflect.TypeOf((*MockRemotePeer)(nil).Meta)) } // ManageNumber mocks base method func (m *MockRemotePeer) ManageNumber() uint32 { - m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ManageNumber") ret0, _ := ret[0].(uint32) return ret0 @@ -115,27 +69,11 @@ func (m *MockRemotePeer) ManageNumber() uint32 { // ManageNumber indicates an expected call of ManageNumber func (mr *MockRemotePeerMockRecorder) ManageNumber() *gomock.Call { - mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ManageNumber", reflect.TypeOf((*MockRemotePeer)(nil).ManageNumber)) } -// Meta mocks base method -func (m *MockRemotePeer) Meta() p2pcommon.PeerMeta { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Meta") - ret0, _ := ret[0].(p2pcommon.PeerMeta) - return ret0 -} - -// Meta indicates an expected call of Meta -func (mr *MockRemotePeerMockRecorder) Meta() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Meta", reflect.TypeOf((*MockRemotePeer)(nil).Meta)) -} - // Name mocks base method func (m *MockRemotePeer) Name() string { - m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Name") ret0, _ := ret[0].(string) return ret0 @@ -143,122 +81,171 @@ func (m *MockRemotePeer) Name() string { // Name indicates an expected call of Name func (mr *MockRemotePeerMockRecorder) Name() *gomock.Call { - mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockRemotePeer)(nil).Name)) } -// PushTxsNotice mocks base method -func (m *MockRemotePeer) PushTxsNotice(arg0 []types.TxID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "PushTxsNotice", arg0) +// Version mocks base method +func (m *MockRemotePeer) Version() string { + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(string) + return ret0 } -// PushTxsNotice indicates an expected call of PushTxsNotice -func (mr *MockRemotePeerMockRecorder) PushTxsNotice(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushTxsNotice", reflect.TypeOf((*MockRemotePeer)(nil).PushTxsNotice), arg0) +// Version indicates an expected call of Version +func (mr *MockRemotePeerMockRecorder) Version() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRemotePeer)(nil).Version)) +} + +// AddMessageHandler mocks base method +func (m *MockRemotePeer) AddMessageHandler(subProtocol p2pcommon.SubProtocol, handler p2pcommon.MessageHandler) { + m.ctrl.Call(m, "AddMessageHandler", subProtocol, handler) +} + +// AddMessageHandler indicates an expected call of AddMessageHandler +func (mr *MockRemotePeerMockRecorder) AddMessageHandler(subProtocol, handler interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddMessageHandler", reflect.TypeOf((*MockRemotePeer)(nil).AddMessageHandler), subProtocol, handler) +} + +// State mocks base method +func (m *MockRemotePeer) State() types.PeerState { + ret := m.ctrl.Call(m, "State") + ret0, _ := ret[0].(types.PeerState) + return ret0 +} + +// State indicates an expected call of State +func (mr *MockRemotePeerMockRecorder) State() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockRemotePeer)(nil).State)) +} + +// LastStatus mocks base method +func (m *MockRemotePeer) LastStatus() *types.LastBlockStatus { + ret := m.ctrl.Call(m, "LastStatus") + ret0, _ := ret[0].(*types.LastBlockStatus) + return ret0 +} + +// LastStatus indicates an expected call of LastStatus +func (mr *MockRemotePeerMockRecorder) LastStatus() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastStatus", reflect.TypeOf((*MockRemotePeer)(nil).LastStatus)) } // RunPeer mocks base method func (m *MockRemotePeer) RunPeer() { - m.ctrl.T.Helper() m.ctrl.Call(m, "RunPeer") } // RunPeer indicates an expected call of RunPeer func (mr *MockRemotePeerMockRecorder) RunPeer() *gomock.Call { - mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunPeer", reflect.TypeOf((*MockRemotePeer)(nil).RunPeer)) } +// Stop mocks base method +func (m *MockRemotePeer) Stop() { + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop +func (mr *MockRemotePeerMockRecorder) Stop() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockRemotePeer)(nil).Stop)) +} + +// SendMessage mocks base method +func (m *MockRemotePeer) SendMessage(msg p2pcommon.MsgOrder) { + m.ctrl.Call(m, "SendMessage", msg) +} + +// SendMessage indicates an expected call of SendMessage +func (mr *MockRemotePeerMockRecorder) SendMessage(msg interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMessage", reflect.TypeOf((*MockRemotePeer)(nil).SendMessage), msg) +} + // SendAndWaitMessage mocks base method -func (m *MockRemotePeer) SendAndWaitMessage(arg0 p2pcommon.MsgOrder, arg1 time.Duration) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAndWaitMessage", arg0, arg1) +func (m *MockRemotePeer) SendAndWaitMessage(msg p2pcommon.MsgOrder, ttl time.Duration) error { + ret := m.ctrl.Call(m, "SendAndWaitMessage", msg, ttl) ret0, _ := ret[0].(error) return ret0 } // SendAndWaitMessage indicates an expected call of SendAndWaitMessage -func (mr *MockRemotePeerMockRecorder) SendAndWaitMessage(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndWaitMessage", reflect.TypeOf((*MockRemotePeer)(nil).SendAndWaitMessage), arg0, arg1) +func (mr *MockRemotePeerMockRecorder) SendAndWaitMessage(msg, ttl interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAndWaitMessage", reflect.TypeOf((*MockRemotePeer)(nil).SendAndWaitMessage), msg, ttl) } -// SendMessage mocks base method -func (m *MockRemotePeer) SendMessage(arg0 p2pcommon.MsgOrder) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendMessage", arg0) +// PushTxsNotice mocks base method +func (m *MockRemotePeer) PushTxsNotice(txHashes []types.TxID) { + m.ctrl.Call(m, "PushTxsNotice", txHashes) } -// SendMessage indicates an expected call of SendMessage -func (mr *MockRemotePeerMockRecorder) SendMessage(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMessage", reflect.TypeOf((*MockRemotePeer)(nil).SendMessage), arg0) +// PushTxsNotice indicates an expected call of PushTxsNotice +func (mr *MockRemotePeerMockRecorder) PushTxsNotice(txHashes interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushTxsNotice", reflect.TypeOf((*MockRemotePeer)(nil).PushTxsNotice), txHashes) } -// State mocks base method -func (m *MockRemotePeer) State() types.PeerState { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "State") - ret0, _ := ret[0].(types.PeerState) - return ret0 +// ConsumeRequest mocks base method +func (m *MockRemotePeer) ConsumeRequest(msgID p2pcommon.MsgID) { + m.ctrl.Call(m, "ConsumeRequest", msgID) } -// State indicates an expected call of State -func (mr *MockRemotePeerMockRecorder) State() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "State", reflect.TypeOf((*MockRemotePeer)(nil).State)) +// ConsumeRequest indicates an expected call of ConsumeRequest +func (mr *MockRemotePeerMockRecorder) ConsumeRequest(msgID interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConsumeRequest", reflect.TypeOf((*MockRemotePeer)(nil).ConsumeRequest), msgID) } -// Stop mocks base method -func (m *MockRemotePeer) Stop() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Stop") +// GetReceiver mocks base method +func (m *MockRemotePeer) GetReceiver(id p2pcommon.MsgID) p2pcommon.ResponseReceiver { + ret := m.ctrl.Call(m, "GetReceiver", id) + ret0, _ := ret[0].(p2pcommon.ResponseReceiver) + return ret0 } -// Stop indicates an expected call of Stop -func (mr *MockRemotePeerMockRecorder) Stop() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockRemotePeer)(nil).Stop)) +// GetReceiver indicates an expected call of GetReceiver +func (mr *MockRemotePeerMockRecorder) GetReceiver(id interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReceiver", reflect.TypeOf((*MockRemotePeer)(nil).GetReceiver), id) } // UpdateBlkCache mocks base method -func (m *MockRemotePeer) UpdateBlkCache(arg0 []byte, arg1 uint64) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateBlkCache", arg0, arg1) +func (m *MockRemotePeer) UpdateBlkCache(blkHash []byte, blkNumber uint64) bool { + ret := m.ctrl.Call(m, "UpdateBlkCache", blkHash, blkNumber) ret0, _ := ret[0].(bool) return ret0 } // UpdateBlkCache indicates an expected call of UpdateBlkCache -func (mr *MockRemotePeerMockRecorder) UpdateBlkCache(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBlkCache", reflect.TypeOf((*MockRemotePeer)(nil).UpdateBlkCache), arg0, arg1) +func (mr *MockRemotePeerMockRecorder) UpdateBlkCache(blkHash, blkNumber interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBlkCache", reflect.TypeOf((*MockRemotePeer)(nil).UpdateBlkCache), blkHash, blkNumber) +} + +// UpdateTxCache mocks base method +func (m *MockRemotePeer) UpdateTxCache(hashes []types.TxID) []types.TxID { + ret := m.ctrl.Call(m, "UpdateTxCache", hashes) + ret0, _ := ret[0].([]types.TxID) + return ret0 +} + +// UpdateTxCache indicates an expected call of UpdateTxCache +func (mr *MockRemotePeerMockRecorder) UpdateTxCache(hashes interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTxCache", reflect.TypeOf((*MockRemotePeer)(nil).UpdateTxCache), hashes) } // UpdateLastNotice mocks base method -func (m *MockRemotePeer) UpdateLastNotice(arg0 []byte, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "UpdateLastNotice", arg0, arg1) +func (m *MockRemotePeer) UpdateLastNotice(blkHash []byte, blkNumber uint64) { + m.ctrl.Call(m, "UpdateLastNotice", blkHash, blkNumber) } // UpdateLastNotice indicates an expected call of UpdateLastNotice -func (mr *MockRemotePeerMockRecorder) UpdateLastNotice(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateLastNotice", reflect.TypeOf((*MockRemotePeer)(nil).UpdateLastNotice), arg0, arg1) +func (mr *MockRemotePeerMockRecorder) UpdateLastNotice(blkHash, blkNumber interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateLastNotice", reflect.TypeOf((*MockRemotePeer)(nil).UpdateLastNotice), blkHash, blkNumber) } -// UpdateTxCache mocks base method -func (m *MockRemotePeer) UpdateTxCache(arg0 []types.TxID) []types.TxID { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateTxCache", arg0) - ret0, _ := ret[0].([]types.TxID) +// MF mocks base method +func (m *MockRemotePeer) MF() p2pcommon.MoFactory { + ret := m.ctrl.Call(m, "MF") + ret0, _ := ret[0].(p2pcommon.MoFactory) return ret0 } -// UpdateTxCache indicates an expected call of UpdateTxCache -func (mr *MockRemotePeerMockRecorder) UpdateTxCache(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTxCache", reflect.TypeOf((*MockRemotePeer)(nil).UpdateTxCache), arg0) +// MF indicates an expected call of MF +func (mr *MockRemotePeerMockRecorder) MF() *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MF", reflect.TypeOf((*MockRemotePeer)(nil).MF)) } diff --git a/p2p/p2pmock/readme.txt b/p2p/p2pmock/readme.txt new file mode 100644 index 000000000..a624457e2 --- /dev/null +++ b/p2p/p2pmock/readme.txt @@ -0,0 +1,11 @@ +Examples to generate mock class + +1. with no flags : it must be followed by manual editing to correct the package of generated mock class +mockgen github.com/aergoio/aergo/p2p/p2pcommon HSHandlerFactory > p2p/p2pmock/mock_hsfactory.go + +2. with flags : generate mocks of all interface in single file +mockgen -source=p2p/p2pcommon/pool.go -package=p2pmock -destination=p2p/p2pmock/mock_peerfinder.go + +3. with flags others : can select the classes (exclude a class) in single file, setting class mapping is too tedious +mockgen -source=p2p/p2pcommon/pool.go -mock_names=WaitingPeerManager=MockWaitingPeerManager -package=p2pmock -destination=p2p/p2pmock/mock_peerfinder.go + diff --git a/p2p/p2putil/ioutil.go b/p2p/p2putil/ioutil.go new file mode 100644 index 000000000..7f8a2a50d --- /dev/null +++ b/p2p/p2putil/ioutil.go @@ -0,0 +1,23 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2putil + +import "io" + +// ReadToLen read bytes from io.Reader until bf is filled. +func ReadToLen(rd io.Reader, bf []byte) (int, error) { + remain := len(bf) + offset := 0 + for remain > 0 { + read, err := rd.Read(bf[offset:]) + if err != nil || read == 0 { + return offset, err + } + remain -= read + offset += read + } + return offset, nil +} diff --git a/p2p/p2putil/ioutil_test.go b/p2p/p2putil/ioutil_test.go new file mode 100644 index 000000000..bd93a0e35 --- /dev/null +++ b/p2p/p2putil/ioutil_test.go @@ -0,0 +1,48 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2putil + +import ( + "bytes" + "testing" +) + +func TestReadToLen(t *testing.T) { + sample := []byte("0123456789ABCDEFGHIJabcdefghij") + + type args struct { + bfLen int + } + tests := []struct { + name string + + args args + repeat int + + want int + }{ + {"TExact",args{4},0, 4}, + {"TBigBuf",args{8},0, 8}, + {"TRepeat",args{4},4, 4}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rd := bytes.NewReader(sample) + bf := make([]byte, 100) + prev := make([]byte, 0, tt.args.bfLen) + for i := 0; i <= tt.repeat; i++ { + got, _ := ReadToLen(rd, bf[:tt.args.bfLen]) + if got != tt.want { + t.Errorf("baseWireHandshaker.readToLen() = %v, want %v", got, tt.want) + } + if bytes.Equal(prev, bf) { + t.Errorf("baseWireHandshaker.readToLen() wrong, same as prev %v", bf) + } + copy(prev, bf) + } + }) + } +} diff --git a/p2p/libp2putil.go b/p2p/p2putil/libp2putil.go similarity index 89% rename from p2p/libp2putil.go rename to p2p/p2putil/libp2putil.go index 46f30ad53..0ddf075f9 100644 --- a/p2p/libp2putil.go +++ b/p2p/p2putil/libp2putil.go @@ -3,14 +3,15 @@ * @copyright defined in aergo/LICENSE.txt */ -package p2p +package p2putil import ( + "bytes" "fmt" "github.com/aergoio/aergo/p2p/p2pcommon" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/libp2p/go-libp2p-crypto" "github.com/libp2p/go-libp2p-peer" + protocol "github.com/libp2p/go-libp2p-protocol" "github.com/multiformats/go-multiaddr" "io/ioutil" "net" @@ -41,7 +42,7 @@ func ToMultiAddr(ipAddr net.IP, port uint32) (multiaddr.Multiaddr, error) { // PeerMetaToMultiAddr make libp2p compatible Multiaddr object from peermeta func PeerMetaToMultiAddr(m p2pcommon.PeerMeta) (multiaddr.Multiaddr, error) { - ipAddr, err := p2putil.GetSingleIPAddress(m.IPAddress) + ipAddr, err := GetSingleIPAddress(m.IPAddress) if err != nil { return nil, err } @@ -96,7 +97,7 @@ func ParseMultiaddrWithResolve(str string) (multiaddr.Multiaddr, error) { return nil, err } domainName := splitted[2] - ips, err := p2putil.ResolveHostDomain(domainName) + ips, err := ResolveHostDomain(domainName) if err != nil { return nil, fmt.Errorf("Could not get IPs: %v\n", err) } @@ -166,9 +167,9 @@ func GenerateKeyFile(dir, prefix string) (crypto.PrivKey, crypto.PubKey, error) func writeToKeyFiles(priv crypto.PrivKey, pub crypto.PubKey, dir, prefix string) error { - pkFile := filepath.Join(dir, prefix+DefaultPkKeyExt) + pkFile := filepath.Join(dir, prefix+p2pcommon.DefaultPkKeyExt) // pubFile := filepath.Join(dir, prefix+".pub") - idFile := filepath.Join(dir, prefix+DefaultPeerIDExt) + idFile := filepath.Join(dir, prefix+p2pcommon.DefaultPeerIDExt) // Write private key file pkf, err := os.Create(pkFile) @@ -192,4 +193,19 @@ func writeToKeyFiles(priv crypto.PrivKey, pub crypto.PubKey, dir, prefix string) idf.Write(idBytes) idf.Sync() return nil +} + +func ProtocolIDsToString(sli []protocol.ID) string { + sb := bytes.NewBuffer(nil) + sb.WriteByte('[') + if len(sli) > 0 { + stop := len(sli)-1 + for i:=0 ; i m.limit { - for i:=0;i m.limit { - for i:=0;i m.limit { - for i:=0;i= macConcurrentQueryCount { + break + } + } + } +} + +type queryStat struct { + pid peer.ID + lastCheck time.Time + nextTurn time.Time +} diff --git a/p2p/peerfinder_test.go b/p2p/peerfinder_test.go new file mode 100644 index 000000000..fd6b9e369 --- /dev/null +++ b/p2p/peerfinder_test.go @@ -0,0 +1,162 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/golang/mock/gomock" + crypto "github.com/libp2p/go-libp2p-crypto" + "github.com/libp2p/go-libp2p-peer" + "reflect" + "testing" +) + +const desigCnt = 10 + +var ( + desigIDs []peer.ID + desigPeers []p2pcommon.PeerMeta + desigPeerMap = make(map[peer.ID]p2pcommon.PeerMeta) + + unknowIDs []peer.ID + unknowPeers []p2pcommon.PeerMeta +) + +func init() { + desigIDs = make([]peer.ID, desigCnt) + desigPeers = make([]p2pcommon.PeerMeta, desigCnt) + for i := 0; i < desigCnt; i++ { + priv, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) + pid, _ := peer.IDFromPrivateKey(priv) + desigIDs[i] = pid + desigPeers[i] = p2pcommon.PeerMeta{ID: pid, Designated: true} + desigPeerMap[desigIDs[i]] = desigPeers[i] + } + unknowIDs = make([]peer.ID, desigCnt) + unknowPeers = make([]p2pcommon.PeerMeta, desigCnt) + for i := 0; i < desigCnt; i++ { + priv, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) + pid, _ := peer.IDFromPrivateKey(priv) + unknowIDs[i] = pid + unknowPeers[i] = p2pcommon.PeerMeta{ID: pid, Designated: false} + } +} +func createDummyPM() *peerManager { + dummyPM := &peerManager{designatedPeers: desigPeerMap, + remotePeers: make(map[peer.ID]p2pcommon.RemotePeer), + waitingPeers: make(map[peer.ID]*p2pcommon.WaitingPeer, 10), + } + return dummyPM +} + +func TestNewPeerFinder(t *testing.T) { + ctrl := gomock.NewController(t) + + type args struct { + useDiscover bool + usePolaris bool + } + tests := []struct { + name string + args args + want p2pcommon.PeerFinder + }{ + {"Tstatic", args{false, false}, &staticPeerFinder{}}, + {"TstaticWPolaris", args{false, true}, &staticPeerFinder{}}, + {"Tdyn", args{true, false}, &dynamicPeerFinder{}}, + {"TdynWPolaris", args{true, true}, &dynamicPeerFinder{}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dummyPM := createDummyPM() + mockActor := p2pmock.NewMockActorService(ctrl) + got := NewPeerFinder(logger, dummyPM, mockActor, 10, tt.args.useDiscover, tt.args.usePolaris) + if reflect.TypeOf(got) != reflect.TypeOf(tt.want) { + t.Errorf("NewPeerFinder() = %v, want %v", reflect.TypeOf(got), reflect.TypeOf(tt.want)) + } + }) + } +} + +func Test_dynamicPeerFinder_OnPeerDisconnect(t *testing.T) { + ctrl := gomock.NewController(t) + + type args struct { + preConnected []peer.ID + inMeta p2pcommon.PeerMeta + } + tests := []struct { + name string + args args + wantCount int + }{ + {"TDesgintedPeer", args{desigIDs, desigPeers[0]}, 1}, + {"TNonPeer", args{unknowIDs, unknowPeers[0]}, 0}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dummyPM := createDummyPM() + mockActor := p2pmock.NewMockActorService(ctrl) + mockPeer := p2pmock.NewMockRemotePeer(ctrl) + mockPeer.EXPECT().ID().Return(tt.args.inMeta.ID).AnyTimes() + mockPeer.EXPECT().Meta().Return(tt.args.inMeta).AnyTimes() + mockPeer.EXPECT().Name().Return(p2putil.ShortMetaForm(tt.args.inMeta)).AnyTimes() + + dp := NewPeerFinder(logger, dummyPM, mockActor, 10, true, false).(*dynamicPeerFinder) + for _, id := range tt.args.preConnected { + dummyPM.remotePeers[id] = &remotePeerImpl{} + dp.OnPeerConnect(id) + } + statCnt := len(dp.qStats) + dp.OnPeerDisconnect(mockPeer) + + if statCnt-1 != len(dp.qStats) { + t.Errorf("count of query peers was not decreaded %v, want %v", len(dp.qStats), statCnt) + } + }) + } +} + +func Test_dynamicPeerFinder_OnPeerConnect(t *testing.T) { + ctrl := gomock.NewController(t) + + type args struct { + preConnected []peer.ID + inMeta p2pcommon.PeerMeta + } + tests := []struct { + name string + args args + wantStatCount int + }{ + {"TDesigPeer", args{desigIDs, desigPeers[0]}, 1}, + {"TNonPeer", args{unknowIDs, unknowPeers[0]}, 1}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dummyPM := createDummyPM() + mockActor := p2pmock.NewMockActorService(ctrl) + mockPeer := p2pmock.NewMockRemotePeer(ctrl) + mockPeer.EXPECT().ID().Return(tt.args.inMeta.ID).AnyTimes() + mockPeer.EXPECT().Meta().Return(tt.args.inMeta).AnyTimes() + mockPeer.EXPECT().Name().Return(p2putil.ShortMetaForm(tt.args.inMeta)).AnyTimes() + + dp := NewPeerFinder(logger, dummyPM, mockActor, 10, true, false).(*dynamicPeerFinder) + + dp.OnPeerConnect(tt.args.inMeta.ID) + + if len(dp.qStats) != tt.wantStatCount { + t.Errorf("count of query peers was not decreaded %v, want %v", len(dp.qStats), tt.wantStatCount) + } else { + if _, exist := dp.qStats[tt.args.inMeta.ID] ; !exist { + t.Errorf("peer query for pid %v missing, want exists", p2putil.ShortForm(tt.args.inMeta.ID)) + } + } + }) + } +} diff --git a/p2p/peermanager.go b/p2p/peermanager.go index 49a39a7e8..43670d3b3 100644 --- a/p2p/peermanager.go +++ b/p2p/peermanager.go @@ -3,22 +3,19 @@ package p2p import ( - "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" + protocol "github.com/libp2p/go-libp2p-protocol" "net" "strconv" "sync" "sync/atomic" "time" + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/message" "github.com/aergoio/aergo/p2p/metric" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2putil" - "github.com/aergoio/aergo/p2p/subproto" - - inet "github.com/libp2p/go-libp2p-net" - - "github.com/aergoio/aergo-lib/log" - "github.com/aergoio/aergo/message" "github.com/aergoio/aergo/types" cfg "github.com/aergoio/aergo/config" @@ -29,6 +26,7 @@ const ( initial = iota running = iota stopping = iota + stopped = iota ) /** @@ -38,38 +36,51 @@ const ( type peerManager struct { status int32 nt p2pcommon.NetworkTransport - hsFactory HSHandlerFactory - handlerFactory HandlerFactory + hsFactory p2pcommon.HSHandlerFactory + handlerFactory p2pcommon.HandlerFactory actorService p2pcommon.ActorService signer p2pcommon.MsgSigner mf p2pcommon.MoFactory mm metric.MetricsManager + skipHandshakeSync bool + peerFinder p2pcommon.PeerFinder + wpManager p2pcommon.WaitingPeerManager // designatedPeers and hiddenPeerSet is set in construction time once and will not be changed hiddenPeerSet map[peer.ID]bool mutex *sync.Mutex manageNumber uint32 - remotePeers map[peer.ID]*remotePeerImpl - peerPool map[peer.ID]p2pcommon.PeerMeta - conf *cfg.P2PConfig + remotePeers map[peer.ID]p2pcommon.RemotePeer + waitingPeers map[peer.ID]*p2pcommon.WaitingPeer + + conf *cfg.P2PConfig // peerCache is copy-on-write style peerCache []p2pcommon.RemotePeer - addPeerChannel chan p2pcommon.PeerMeta - fillPoolChannel chan []p2pcommon.PeerMeta - finishChannel chan struct{} - eventListeners []PeerEventListener + getPeerChannel chan getPeerChan + peerHandshaked chan p2pcommon.RemotePeer + removePeerChannel chan p2pcommon.RemotePeer + fillPoolChannel chan []p2pcommon.PeerMeta + inboundConnChan chan inboundConnEvent + workDoneChannel chan p2pcommon.ConnWorkResult + + finishChannel chan struct{} + + eventListeners []PeerEventListener // designatedPeers map[peer.ID]p2pcommon.PeerMeta - awaitMutex sync.RWMutex - awaitPeers map[peer.ID]*reconnectJob - awaitDone chan struct{} logger *log.Logger } +// getPeerChan is struct to get peer for concurrent use +type getPeerChan struct { + id peer.ID + ret chan p2pcommon.RemotePeer +} + var _ p2pcommon.PeerManager = (*peerManager)(nil) // PeerEventListener listen peer manage event @@ -82,7 +93,7 @@ type PeerEventListener interface { } // NewPeerManager creates a peer manager object. -func NewPeerManager(handlerFactory HandlerFactory, hsFactory HSHandlerFactory, iServ p2pcommon.ActorService, cfg *cfg.Config, signer p2pcommon.MsgSigner, nt p2pcommon.NetworkTransport, mm metric.MetricsManager, logger *log.Logger, mf p2pcommon.MoFactory) p2pcommon.PeerManager { +func NewPeerManager(handlerFactory p2pcommon.HandlerFactory, hsFactory p2pcommon.HSHandlerFactory, iServ p2pcommon.ActorService, cfg *cfg.Config, signer p2pcommon.MsgSigner, nt p2pcommon.NetworkTransport, mm metric.MetricsManager, logger *log.Logger, mf p2pcommon.MoFactory, skipHandshakeSync bool) p2pcommon.PeerManager { p2pConf := cfg.P2P //logger.SetLevel("debug") pm := &peerManager{ @@ -96,22 +107,26 @@ func NewPeerManager(handlerFactory HandlerFactory, hsFactory HSHandlerFactory, i mm: mm, logger: logger, mutex: &sync.Mutex{}, + skipHandshakeSync: skipHandshakeSync, status: initial, designatedPeers: make(map[peer.ID]p2pcommon.PeerMeta, len(cfg.P2P.NPAddPeers)), hiddenPeerSet: make(map[peer.ID]bool, len(cfg.P2P.NPHiddenPeers)), - remotePeers: make(map[peer.ID]*remotePeerImpl, p2pConf.NPMaxPeers), + remotePeers: make(map[peer.ID]p2pcommon.RemotePeer, p2pConf.NPMaxPeers), - awaitPeers: make(map[peer.ID]*reconnectJob, p2pConf.NPPeerPool), - peerPool: make(map[peer.ID]p2pcommon.PeerMeta, p2pConf.NPPeerPool), - peerCache: make([]p2pcommon.RemotePeer, 0, p2pConf.NPMaxPeers), - awaitDone: make(chan struct{}), + waitingPeers: make(map[peer.ID]*p2pcommon.WaitingPeer, p2pConf.NPPeerPool), - addPeerChannel: make(chan p2pcommon.PeerMeta, 2), - fillPoolChannel: make(chan []p2pcommon.PeerMeta, 2), - eventListeners: make([]PeerEventListener, 0, 4), - finishChannel: make(chan struct{}), + peerCache: make([]p2pcommon.RemotePeer, 0, p2pConf.NPMaxPeers), + + getPeerChannel: make(chan getPeerChan), + peerHandshaked: make(chan p2pcommon.RemotePeer), + removePeerChannel: make(chan p2pcommon.RemotePeer), + fillPoolChannel: make(chan []p2pcommon.PeerMeta, 2), + inboundConnChan: make(chan inboundConnEvent), + workDoneChannel: make(chan p2pcommon.ConnWorkResult), + eventListeners: make([]PeerEventListener, 0, 4), + finishChannel: make(chan struct{}), } // additional initializations @@ -124,13 +139,7 @@ func (pm *peerManager) SelfMeta() p2pcommon.PeerMeta { return pm.nt.SelfMeta() } func (pm *peerManager) SelfNodeID() peer.ID { - return pm.nt.ID() -} - -func (pm *peerManager) RegisterEventListener(listener PeerEventListener) { - pm.mutex.Lock() - defer pm.mutex.Unlock() - pm.eventListeners = append(pm.eventListeners, listener) + return p2pkey.NodeID() } func (pm *peerManager) init() { @@ -144,36 +153,32 @@ func (pm *peerManager) init() { } pm.hiddenPeerSet[pid] = true } + + pm.peerFinder = NewPeerFinder(pm.logger, pm, pm.actorService, pm.conf.NPPeerPool, pm.conf.NPDiscoverPeers, pm.conf.NPUsePolaris) + pm.wpManager = NewWaitingPeerManager(pm.logger, pm, pm.actorService, pm.conf.NPPeerPool, pm.conf.NPDiscoverPeers, pm.conf.NPUsePolaris) + // add designated peers to waiting pool at initial time. + for _, meta := range pm.designatedPeers { + if _, foundInWait := pm.waitingPeers[meta.ID]; !foundInWait { + pm.waitingPeers[meta.ID] = &p2pcommon.WaitingPeer{Meta: meta, NextTrial: time.Now()} + } + } + } func (pm *peerManager) Start() error { - go pm.runManagePeers() - // need to start listen after chainservice is read to init - // FIXME: adhoc code - go func() { - //time.Sleep(time.Second * 3) - pm.nt.AddStreamHandler(p2pcommon.AergoP2PSub, pm.onConnect) - pm.logger.Info().Str("version", string(p2pcommon.AergoP2PSub)).Msg("Starting p2p listening") - - // addition should start after all modules are started - go func() { - time.Sleep(time.Second * 2) - for _, meta := range pm.designatedPeers { - pm.addPeerChannel <- meta - } - }() - }() - if !atomic.CompareAndSwapInt32(&pm.status, initial, running) { - panic("wrong internal status") - } return nil } func (pm *peerManager) Stop() error { - if !atomic.CompareAndSwapInt32(&pm.status, running, stopping) { + if atomic.CompareAndSwapInt32(&pm.status, running, stopping) { pm.finishChannel <- struct{}{} + } else { + // leave stopped if already stopped + if atomic.SwapInt32(&pm.status, stopping) == stopped { + atomic.StoreInt32(&pm.status, stopped) + } } return nil } @@ -181,7 +186,7 @@ func (pm *peerManager) Stop() error { func (pm *peerManager) initDesignatedPeerList() { // add remote node from config for _, target := range pm.conf.NPAddPeers { - peerMeta, err := ParseMultiAddrString(target) + peerMeta, err := p2putil.ParseMultiAddrString(target) if err != nil { pm.logger.Warn().Err(err).Str("str", target).Msg("invalid NPAddPeer address") continue @@ -194,31 +199,87 @@ func (pm *peerManager) initDesignatedPeerList() { } func (pm *peerManager) runManagePeers() { - initialAddrDelay := time.Second * 20 - initialTimer := time.NewTimer(initialAddrDelay) - addrTicker := time.NewTicker(DiscoveryQueryInterval) + + pm.logger.Info().Str("p2p_proto", p2putil.ProtocolIDsToString([]protocol.ID{p2pcommon.P2PSubAddr, p2pcommon.LegacyP2PSubAddr})).Msg("Starting p2p listening") + pm.nt.AddStreamHandler(p2pcommon.LegacyP2PSubAddr, pm.wpManager.OnInboundConnLegacy) + pm.nt.AddStreamHandler(p2pcommon.P2PSubAddr, pm.wpManager.OnInboundConn) + + if !atomic.CompareAndSwapInt32(&pm.status, initial, running) { + panic("wrong internal status") + } + instantStart := time.Millisecond << 4 + initialAddrDelay := time.Second * 2 + finderTimer := time.NewTimer(initialAddrDelay) + connManTimer := time.NewTimer(initialAddrDelay << 1) + MANLOOP: for { select { - case meta := <-pm.addPeerChannel: - if pm.addOutboundPeer(meta) { - pm.cancelAwait(meta.ID) + case req := <-pm.getPeerChannel: + peer, exist := pm.remotePeers[req.id] + if exist { + req.ret <- peer + } else { + req.ret <- nil + } + case peer := <-pm.peerHandshaked: + if pm.tryRegister(peer) { + pm.peerFinder.OnPeerConnect(peer.ID()) + pm.wpManager.OnPeerConnect(peer.ID()) + + pm.checkSync(peer) + + // query other peers + if !finderTimer.Stop() { + <-finderTimer.C + } + finderTimer.Reset(instantStart) + } + case peer := <-pm.removePeerChannel: + if pm.removePeer(peer) { + pm.peerFinder.OnPeerDisconnect(peer) + pm.wpManager.OnPeerDisconnect(peer) + } + if !connManTimer.Stop() { + <-connManTimer.C + } + connManTimer.Reset(instantStart) + case inInfo := <-pm.inboundConnChan: + id := inInfo.meta.ID + if _, found := pm.remotePeers[id]; found { + inInfo.foundC <- true + } else { + inInfo.foundC <- false } - case <-initialTimer.C: - initialTimer.Stop() - pm.checkAndCollectPeerListFromAll() - case <-addrTicker.C: - pm.checkAndCollectPeerListFromAll() + case workResult := <-pm.workDoneChannel: + pm.wpManager.OnWorkDone(workResult) + // Retry + if !connManTimer.Stop() { + <-connManTimer.C + } + connManTimer.Reset(instantStart) + case <-finderTimer.C: + pm.peerFinder.CheckAndFill() + finderTimer.Reset(DiscoveryQueryInterval) + case <-connManTimer.C: + pm.wpManager.CheckAndConnect() + // fire at next interval + connManTimer.Reset(p2pcommon.WaitingPeerManagerInterval) case peerMetas := <-pm.fillPoolChannel: - pm.tryFillPool(&peerMetas) + if pm.wpManager.OnDiscoveredPeers(peerMetas) > 0 { + if !connManTimer.Stop() { + <-connManTimer.C + } + connManTimer.Reset(instantStart) + } case <-pm.finishChannel: - addrTicker.Stop() + finderTimer.Stop() + connManTimer.Stop() break MANLOOP } } // guarrenty no new peer connection will be made - pm.cancelAllAwait() - pm.nt.RemoveStreamHandler(p2pcommon.AergoP2PSub) + pm.nt.RemoveStreamHandler(p2pcommon.LegacyP2PSubAddr) pm.logger.Info().Msg("Finishing peerManager") go func() { @@ -228,100 +289,38 @@ MANLOOP: } }() timer := time.NewTimer(time.Second * 30) - finishPoll := time.NewTicker(time.Second) + finishPoll := time.NewTicker(time.Millisecond << 6) CLEANUPLOOP: for { select { + case req := <-pm.getPeerChannel: + req.ret <- nil + case peer := <-pm.removePeerChannel: + pm.removePeer(peer) case <-finishPoll.C: - pm.mutex.Lock() if len(pm.remotePeers) == 0 { - pm.mutex.Unlock() pm.logger.Debug().Msg("All peers were finished peerManager") break CLEANUPLOOP } - pm.mutex.Unlock() case <-timer.C: pm.logger.Warn().Int("remained", len(pm.peerCache)).Msg("peermanager stop timeout. some peers were not finished.") break CLEANUPLOOP } } + atomic.StoreInt32(&pm.status, stopped) } -// addOutboundPeer try to connect and handshake to remote peer. it can be called after peermanager is inited. -// It return true if peer is added or return false if failed to add peer or more suitable connection already exists. -func (pm *peerManager) addOutboundPeer(meta p2pcommon.PeerMeta) bool { - s, err := pm.nt.GetOrCreateStream(meta, p2pcommon.AergoP2PSub) - if err != nil { - pm.logger.Info().Err(err).Str(p2putil.LogPeerID, p2putil.ShortForm(meta.ID)).Msg("Failed to get stream.") - return false - } - - completeMeta, added := pm.tryAddPeer(true, meta, s) - if !added { - s.Close() - return false - } else { - if meta.IPAddress != completeMeta.IPAddress { - pm.logger.Debug().Str(p2putil.LogPeerID, p2putil.ShortForm(completeMeta.ID)).Str("before", meta.IPAddress).Str("after", completeMeta.IPAddress).Msg("IP address of remote peer is changed to ") - } - } - return true -} - -// tryAddPeer will do check connecting peer and add. it will return peer meta information received from -// remote peer. stream s will be owned to remotePeer if succeed to add perr. -func (pm *peerManager) tryAddPeer(outbound bool, meta p2pcommon.PeerMeta, s inet.Stream) (p2pcommon.PeerMeta, bool) { - var peerID = meta.ID - rd := metric.NewReader(s) - wt := metric.NewWriter(s) - h := pm.hsFactory.CreateHSHandler(outbound, pm, pm.actorService, pm.logger, peerID) - rw, remoteStatus, err := h.Handle(rd, wt, defaultHandshakeTTL) - if err != nil { - pm.logger.Debug().Err(err).Str(p2putil.LogPeerID, p2putil.ShortForm(meta.ID)).Msg("Failed to handshake") - if rw != nil { - pm.sendGoAway(rw, err.Error()) - } - return meta, false - } - // update peer meta info using sent information from remote peer - receivedMeta := p2pcommon.NewMetaFromStatus(remoteStatus, outbound) - if receivedMeta.ID != peerID { - pm.logger.Debug().Str("received_peer_id", receivedMeta.ID.Pretty()).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("Inconsistent peerID") - pm.sendGoAway(rw, "Inconsistent peerID") - return meta, false - } - _, receivedMeta.Designated = pm.designatedPeers[peerID] - - // adding peer to peer list - newPeer, err := pm.registerPeer(peerID, receivedMeta, remoteStatus, s, rw) - if err != nil { - pm.sendGoAway(rw, err.Error()) - return meta, false - } - newPeer.metric = pm.mm.Add(peerID, rd, wt) - - if pm.logger.IsDebugEnabled() { - addrStrs := pm.nt.GetAddressesOfPeer(peerID) - pm.logger.Debug().Strs("addrs", addrStrs).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("addresses of peer") - } - - pm.doPostHandshake(peerID, remoteStatus) - // notice to p2pmanager that handshaking is finished - pm.NotifyPeerHandshake(peerID) - - return receivedMeta, true -} - -func (pm *peerManager) registerPeer(peerID peer.ID, receivedMeta p2pcommon.PeerMeta, status *types.Status, s inet.Stream, rw p2pcommon.MsgReadWriter) (*remotePeerImpl, error) { - pm.mutex.Lock() - defer pm.mutex.Unlock() +// tryRegister register peer to peer manager, if peer with same peer +func (pm *peerManager) tryRegister(peer p2pcommon.RemotePeer) bool { + peerID := peer.ID() + receivedMeta := peer.Meta() preExistPeer, ok := pm.remotePeers[peerID] if ok { pm.logger.Info().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("Peer add collision. Outbound connection of higher hash will survive.") - iAmLower := p2putil.ComparePeerID(pm.SelfNodeID(), receivedMeta.ID) <= 0 - if iAmLower == receivedMeta.Outbound { + iAmLowerOrEqual := p2putil.ComparePeerID(pm.SelfNodeID(), receivedMeta.ID) <= 0 + if iAmLowerOrEqual == receivedMeta.Outbound { pm.logger.Info().Str("local_peer_id", p2putil.ShortForm(pm.SelfNodeID())).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Bool("outbound", receivedMeta.Outbound).Msg("Close connection and keep earlier handshake connection.") - return nil, fmt.Errorf("Already handshake peer %s ", p2putil.ShortForm(peerID)) + return false } else { pm.logger.Info().Str("local_peer_id", p2putil.ShortForm(pm.SelfNodeID())).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Bool("outbound", receivedMeta.Outbound).Msg("Keep connection and close earlier handshake connection.") // stopping lower valued connection @@ -329,54 +328,27 @@ func (pm *peerManager) registerPeer(peerID peer.ID, receivedMeta p2pcommon.PeerM } } - outboundPeer := newRemotePeer(receivedMeta, pm.GetNextManageNum(), pm, pm.actorService, pm.logger, pm.mf, pm.signer, s, rw) - outboundPeer.UpdateBlkCache(status.GetBestBlockHash(), status.GetBestHeight()) + go peer.RunPeer() + // FIXME type casting is worse + pm.insertPeer(peerID, peer) + pm.logger.Info().Bool("outbound", receivedMeta.Outbound).Str(p2putil.LogPeerName, peer.Name()).Str("addr", net.ParseIP(receivedMeta.IPAddress).String()+":"+strconv.Itoa(int(receivedMeta.Port))).Msg("peer is added to peerService") - // insert Handlers - pm.handlerFactory.insertHandlers(outboundPeer) + // TODO add triggering sync. - go outboundPeer.RunPeer() - pm.insertPeer(peerID, outboundPeer) - pm.logger.Info().Bool("outbound", receivedMeta.Outbound).Str(p2putil.LogPeerName, outboundPeer.Name()).Str("addr", net.ParseIP(receivedMeta.IPAddress).String()+":"+strconv.Itoa(int(receivedMeta.Port))).Msg("peer is added to peerService") - - return outboundPeer, nil -} - -// doPostHandshake is additional work after peer is added. -func (pm *peerManager) doPostHandshake(peerID peer.ID, remoteStatus *types.Status) { - - pm.logger.Debug().Uint64("target", remoteStatus.BestHeight).Msg("request new syncer") - pm.actorService.SendRequest(message.SyncerSvc, &message.SyncStart{PeerID: peerID, TargetNo: remoteStatus.BestHeight}) - - // sync mempool tx infos - // TODO add tx handling + return true } func (pm *peerManager) GetNextManageNum() uint32 { return atomic.AddUint32(&pm.manageNumber, 1) } -func (pm *peerManager) sendGoAway(rw p2pcommon.MsgReadWriter, msg string) { - goMsg := &types.GoAwayNotice{Message: msg} - // TODO code smell. non safe casting. - mo := pm.mf.NewMsgRequestOrder(false, subproto.GoAway, goMsg).(*pbRequestOrder) - container := mo.message - rw.WriteMsg(container) -} - -func (pm *peerManager) AddNewPeer(peer p2pcommon.PeerMeta) { - pm.addPeerChannel <- peer +func (pm *peerManager) AddNewPeer(meta p2pcommon.PeerMeta) { + sli := []p2pcommon.PeerMeta{meta} + pm.fillPoolChannel <- sli } func (pm *peerManager) RemovePeer(peer p2pcommon.RemotePeer) { - pm.removePeer(peer) -} - -func (pm *peerManager) NotifyPeerHandshake(peerID peer.ID) { - // TODO code smell. - if pm.conf.NPDiscoverPeers { - pm.checkAndCollectPeerList(peerID) - } + pm.removePeerChannel <- peer } func (pm *peerManager) NotifyPeerAddressReceived(metas []p2pcommon.PeerMeta) { @@ -385,17 +357,15 @@ func (pm *peerManager) NotifyPeerAddressReceived(metas []p2pcommon.PeerMeta) { // removePeer unregister managed remote peer connection // It return true if peer is exist and managed by peermanager +// it must called in peermanager goroutine func (pm *peerManager) removePeer(peer p2pcommon.RemotePeer) bool { peerID := peer.ID() - pm.mutex.Lock() target, ok := pm.remotePeers[peerID] if !ok { - pm.mutex.Unlock() return false } - if target.manageNum != peer.ManageNumber() { + if target.ManageNumber() != peer.ManageNumber() { pm.logger.Debug().Uint32("remove_num", peer.ManageNumber()).Uint32("exist_num", target.ManageNumber()).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("remove peer is requested but already removed and other instance is on") - pm.mutex.Unlock() return false } if target.State() == types.RUNNING { @@ -403,125 +373,23 @@ func (pm *peerManager) removePeer(peer p2pcommon.RemotePeer) bool { } pm.deletePeer(peerID) pm.logger.Info().Uint32("manage_num", peer.ManageNumber()).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("removed peer in peermanager") - pm.mutex.Unlock() for _, listener := range pm.eventListeners { listener.OnRemovePeer(peerID) } - if meta, found := pm.designatedPeers[peer.ID()]; found { - pm.addAwait(meta) - } return true } -func (pm *peerManager) onConnect(s inet.Stream) { - peerID := s.Conn().RemotePeer() - tempMeta := p2pcommon.PeerMeta{ID: peerID} - addr := s.Conn().RemoteMultiaddr() - - pm.logger.Debug().Str(p2putil.LogFullID, peerID.Pretty()).Str("multiaddr", addr.String()).Msg("new inbound peer arrived") - completeMeta, added := pm.tryAddPeer(false, tempMeta, s) - if !added { - s.Close() - } else { - if tempMeta.IPAddress != completeMeta.IPAddress { - pm.logger.Debug().Str("after", completeMeta.IPAddress).Msg("Update IP address of inbound remote peer") - } - } -} - -func (pm *peerManager) checkAndCollectPeerListFromAll() { - if pm.hasEnoughPeers() { - return - } - if pm.conf.NPUsePolaris { - pm.logger.Debug().Msg("Sending map query to polaris") - pm.actorService.SendRequest(message.P2PSvc, &message.MapQueryMsg{Count: MaxAddrListSizePolaris}) - } - - // if server is not discover new peer, such as of BP or backup node, it does not send addresses reqeust to other peer. - // These types are only connect to designated peers. - if pm.conf.NPDiscoverPeers { - // not strictly need to check peers. so use cache instead - for _, remotePeer := range pm.peerCache { - pm.actorService.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: remotePeer.ID(), Size: MaxAddrListSizePeer, Offset: 0}) - } - } -} - -func (pm *peerManager) checkAndCollectPeerList(ID peer.ID) { - if pm.hasEnoughPeers() { - return - } - rPeer, ok := pm.GetPeer(ID) - if !ok { - pm.logger.Warn().Str(p2putil.LogFullID, ID.Pretty()).Msg("invalid peer id") - return - } - pm.actorService.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: rPeer.ID(), Size: 20, Offset: 0}) -} - -func (pm *peerManager) hasEnoughPeers() bool { - return len(pm.peerPool) >= pm.conf.NPPeerPool -} - -// tryConnectPeers should be called in runManagePeers() only -func (pm *peerManager) tryFillPool(metas *[]p2pcommon.PeerMeta) { - added := make([]p2pcommon.PeerMeta, 0, len(*metas)) - invalid := make([]string, 0) - for _, meta := range *metas { - if string(meta.ID) == "" { - invalid = append(invalid, p2putil.ShortMetaForm(meta)) - continue - } - _, found := pm.peerPool[meta.ID] - if !found { - // change some properties - meta.Outbound = true - meta.Designated = false - pm.peerPool[meta.ID] = meta - added = append(added, meta) - } - } - if len(invalid) > 0 { - pm.logger.Warn().Strs("metas", invalid).Msg("invalid meta list was come") - } - pm.logger.Debug().Int("added_cnt", len(added)).Msg("Filled unknown peer addresses to peerpool") - pm.tryConnectPeers() -} - -// tryConnectPeers should be called in runManagePeers() only -func (pm *peerManager) tryConnectPeers() { - remained := pm.conf.NPMaxPeers - len(pm.remotePeers) - for ID, meta := range pm.peerPool { - if _, found := pm.GetPeer(ID); found { - delete(pm.peerPool, ID) - continue - } - if meta.IPAddress == "" || meta.Port == 0 { - pm.logger.Warn().Str(p2putil.LogPeerID, p2putil.ShortForm(meta.ID)).Str("addr", meta.IPAddress). - Uint32("port", meta.Port).Msg("Invalid peer meta informations") - continue - } - // in same go rountine. - pm.addOutboundPeer(meta) - remained-- - if remained <= 0 { - break - } - } -} - func (pm *peerManager) GetPeer(ID peer.ID) (p2pcommon.RemotePeer, bool) { - pm.mutex.Lock() - defer pm.mutex.Unlock() + gc := getPeerChan{id: ID, ret: make(chan p2pcommon.RemotePeer)} // vs code's lint does not allow direct return of map operation - ptr, ok := pm.remotePeers[ID] - if !ok { + pm.getPeerChannel <- gc + ptr := <-gc.ret + if ptr == nil { return nil, false } - return ptr, ok + return ptr, true } func (pm *peerManager) GetPeers() []p2pcommon.RemotePeer { @@ -550,7 +418,7 @@ func (pm *peerManager) GetPeerAddresses(noHidden bool, showSelf bool) []*message return nil } selfpi := &message.PeerInfo{ - &addr, meta.Hidden, time.Now(), bestBlk.BlockHash(), bestBlk.Header.BlockNo, types.RUNNING, true} + &addr, meta.Version, meta.Hidden, time.Now(), bestBlk.BlockHash(), bestBlk.Header.BlockNo, types.RUNNING, true} peers = append(peers, selfpi) } for _, aPeer := range pm.peerCache { @@ -559,19 +427,16 @@ func (pm *peerManager) GetPeerAddresses(noHidden bool, showSelf bool) []*message continue } addr := meta.ToPeerAddress() - lastNoti := aPeer.LastNotice() + lastNoti := aPeer.LastStatus() pi := &message.PeerInfo{ - &addr, meta.Hidden, lastNoti.CheckTime, lastNoti.BlockHash, lastNoti.BlockNumber, aPeer.State(), false} + &addr, meta.Version, meta.Hidden, lastNoti.CheckTime, lastNoti.BlockHash, lastNoti.BlockNumber, aPeer.State(), false} peers = append(peers, pi) } return peers } // this method should be called inside pm.mutex -func (pm *peerManager) insertPeer(ID peer.ID, peer *remotePeerImpl) { - if _, exist := pm.hiddenPeerSet[ID]; exist { - peer.meta.Hidden = true - } +func (pm *peerManager) insertPeer(ID peer.ID, peer p2pcommon.RemotePeer) { pm.remotePeers[ID] = peer pm.updatePeerCache() } @@ -588,43 +453,16 @@ func (pm *peerManager) updatePeerCache() { for _, rPeer := range pm.remotePeers { newSlice = append(newSlice, rPeer) } + pm.mutex.Lock() + defer pm.mutex.Unlock() pm.peerCache = newSlice } -func (pm *peerManager) addAwait(meta p2pcommon.PeerMeta) { - pm.awaitMutex.Lock() - defer pm.awaitMutex.Unlock() - if _, exist := pm.awaitPeers[meta.ID]; exist { - return - } - if atomic.LoadInt32(&pm.status) != running { - return - } - job := newReconnectRunner(meta, pm, pm.logger) - pm.awaitPeers[meta.ID] = job - go job.runJob() -} - -func (pm *peerManager) cancelAwait(id peer.ID) { - pm.awaitMutex.Lock() - defer pm.awaitMutex.Unlock() - defer func() { - if atomic.LoadInt32(&pm.status) == stopping && - len(pm.awaitPeers) == 0 { - pm.awaitDone <- struct{}{} - } - }() - job, exist := pm.awaitPeers[id] - if !exist { +func (pm *peerManager) checkSync(peer p2pcommon.RemotePeer) { + if pm.skipHandshakeSync { return } - delete(pm.awaitPeers, id) - job.cancel <- struct{}{} -} -func (pm *peerManager) cancelAllAwait() { - for id, _ := range pm.awaitPeers { - go pm.cancelAwait(id) - } - <-pm.awaitDone -} + pm.logger.Debug().Uint64("target", peer.LastStatus().BlockNumber).Msg("request new syncer") + pm.actorService.SendRequest(message.SyncerSvc, &message.SyncStart{PeerID: peer.ID(), TargetNo: peer.LastStatus().BlockNumber}) +} \ No newline at end of file diff --git a/p2p/peermanager_test.go b/p2p/peermanager_test.go index 6ba819b66..6adead503 100644 --- a/p2p/peermanager_test.go +++ b/p2p/peermanager_test.go @@ -1,26 +1,25 @@ -/* - * @file - * @copyright defined in aergo/LICENSE.txt - */ package p2p import ( "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" + crypto "github.com/libp2p/go-libp2p-crypto" "strconv" "sync" + "sync/atomic" "testing" + "time" + "github.com/aergoio/aergo-lib/log" + cfg "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/message" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2pmock" "github.com/aergoio/aergo/p2p/p2putil" + "github.com/aergoio/aergo/types" "github.com/golang/mock/gomock" peer "github.com/libp2p/go-libp2p-peer" "github.com/stretchr/testify/assert" - - "github.com/aergoio/aergo-lib/log" - cfg "github.com/aergoio/aergo/config" - "github.com/aergoio/aergo/message" - "github.com/aergoio/aergo/types" ) func FailTestGetPeers(t *testing.T) { @@ -35,7 +34,7 @@ func FailTestGetPeers(t *testing.T) { target := NewPeerManager(nil, nil, mockActorServ, cfg.NewServerContext("", "").GetDefaultConfig().(*cfg.Config), nil, nil, nil, - log.NewLogger("test.p2p"), mockMF).(*peerManager) + log.NewLogger("test.p2p"), mockMF, false).(*peerManager) iterSize := 500 wg := sync.WaitGroup{} @@ -72,11 +71,11 @@ func TestPeerManager_GetPeers(t *testing.T) { tLogger := log.NewLogger("test.p2p") tConfig := cfg.NewServerContext("", "").GetDefaultConfig().(*cfg.Config) - InitNodeInfo(&tConfig.BaseConfig, tConfig.P2P, tLogger) + p2pkey.InitNodeInfo(&tConfig.BaseConfig, tConfig.P2P, "1.0.0-test", tLogger) target := NewPeerManager(nil, nil, mockActorServ, tConfig, nil, nil, nil, - tLogger, mockMF).(*peerManager) + tLogger, mockMF, false).(*peerManager) iterSize := 500 wg := &sync.WaitGroup{} @@ -102,7 +101,7 @@ func TestPeerManager_GetPeers(t *testing.T) { for _ = range target.GetPeers() { cnt++ } - assert.True(t, cnt > (iterSize>>2)) + assert.True(t, cnt > (iterSize >> 2)) waitChan <- 0 }() @@ -113,22 +112,38 @@ func TestPeerManager_GetPeers(t *testing.T) { } func TestPeerManager_GetPeerAddresses(t *testing.T) { - peersLen := 3 + peersLen := 6 + hiddenCnt := 3 samplePeers := make([]*remotePeerImpl, peersLen) - samplePeers[0] = &remotePeerImpl{meta: p2pcommon.PeerMeta{ID: dummyPeerID}, lastNotice: &types.LastBlockStatus{}} - samplePeers[1] = &remotePeerImpl{meta: p2pcommon.PeerMeta{ID: dummyPeerID2}, lastNotice: &types.LastBlockStatus{}} - samplePeers[2] = &remotePeerImpl{meta: p2pcommon.PeerMeta{ID: dummyPeerID3}, lastNotice: &types.LastBlockStatus{}} + for i := 0; i < peersLen; i++ { + pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) + pid, _ := peer.IDFromPrivateKey(pkey) + samplePeers[i] = &remotePeerImpl{meta: p2pcommon.PeerMeta{ID: pid, Hidden: i < hiddenCnt}, lastStatus: &types.LastBlockStatus{}} + } + tests := []struct { name string + + hidden bool + showself bool + + wantCnt int }{ - // TODO: test cases + {"TDefault", false, false, peersLen}, + {"TWSelf", false, true, peersLen + 1}, + {"TWOHidden", true, false, peersLen - hiddenCnt}, + {"TWOHiddenWSelf", false, true, peersLen - hiddenCnt + 1}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - pm := &peerManager{remotePeers: make(map[peer.ID]*remotePeerImpl)} + pm := &peerManager{ + remotePeers: make(map[peer.ID]p2pcommon.RemotePeer), + mutex: &sync.Mutex{}, + } for _, peer := range samplePeers { pm.remotePeers[peer.ID()] = peer } + pm.updatePeerCache() actPeers := pm.GetPeerAddresses(false, false) assert.Equal(t, peersLen, len(actPeers)) @@ -139,7 +154,7 @@ func TestPeerManager_GetPeerAddresses(t *testing.T) { func TestPeerManager_init(t *testing.T) { tConfig := cfg.NewServerContext("", "").GetDefaultConfig().(*cfg.Config) defaultCfg := tConfig.P2P - InitNodeInfo(&tConfig.BaseConfig, defaultCfg, logger) + p2pkey.InitNodeInfo(&tConfig.BaseConfig, defaultCfg, "1.0.0-test", logger) localIP, _ := p2putil.ExternalIP() tests := []struct { @@ -181,3 +196,222 @@ func TestPeerManager_init(t *testing.T) { }) } } + +func Test_peerManager_runManagePeers_MultiConnWorks(t *testing.T) { + // Test if it works well when concurrent connections is handshaked. + ctrl := gomock.NewController(t) + logger := log.NewLogger("p2p.test") + type desc struct { + pid peer.ID + outbound bool + hsTime time.Duration + } + ds := make([]desc, 10) + for i := 0; i < 10; i++ { + pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) + pid, _ := peer.IDFromPrivateKey(pkey) + ds[i] = desc{hsTime: time.Millisecond * 10, outbound: true, pid: pid} + } + tests := []struct { + name string + + conns []desc + }{ + {"T10", ds}, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockPeerFinder := p2pmock.NewMockPeerFinder(ctrl) + mockWPManager := p2pmock.NewMockWaitingPeerManager(ctrl) + mockWPManager.EXPECT().CheckAndConnect().AnyTimes() + mockNT := p2pmock.NewMockNetworkTransport(ctrl) + mockNT.EXPECT().AddStreamHandler(gomock.Any(), gomock.Any()).AnyTimes() + mockNT.EXPECT().RemoveStreamHandler(gomock.Any()).AnyTimes() + + dummyCfg := &cfg.P2PConfig{} + pm := &peerManager{ + peerFinder: mockPeerFinder, + wpManager: mockWPManager, + remotePeers: make(map[peer.ID]p2pcommon.RemotePeer, 10), + waitingPeers: make(map[peer.ID]*p2pcommon.WaitingPeer, 10), + conf: dummyCfg, + nt: mockNT, + + getPeerChannel: make(chan getPeerChan), + peerHandshaked: make(chan p2pcommon.RemotePeer), + removePeerChannel: make(chan p2pcommon.RemotePeer), + fillPoolChannel: make(chan []p2pcommon.PeerMeta, 2), + inboundConnChan: make(chan inboundConnEvent), + workDoneChannel: make(chan p2pcommon.ConnWorkResult), + eventListeners: make([]PeerEventListener, 0, 4), + finishChannel: make(chan struct{}), + + logger: logger, + } + + go pm.runManagePeers() + + workWG := sync.WaitGroup{} + workWG.Add(len(tt.conns)) + latch := sync.WaitGroup{} + latch.Add(len(tt.conns)) + finCnt := uint32(0) + for i, conn := range tt.conns { + meta := p2pcommon.PeerMeta{ID: conn.pid, Outbound: conn.outbound} + wr := p2pcommon.ConnWorkResult{Meta: meta, Result: nil, Inbound: !conn.outbound, Seq: uint32(i)} + go func(conn desc, result p2pcommon.ConnWorkResult) { + latch.Done() + latch.Wait() + //fmt.Printf("work start %s #%d",p2putil.ShortForm(meta.ID),i) + //time.Sleep(conn.hsTime) + fmt.Printf("work done %s #%d\n", p2putil.ShortForm(meta.ID), wr.Seq) + pm.workDoneChannel <- result + }(conn, wr) + } + mockWPManager.EXPECT().OnWorkDone(gomock.AssignableToTypeOf(p2pcommon.ConnWorkResult{})).Do( + func(wr p2pcommon.ConnWorkResult) { + atomic.AddUint32(&finCnt,1) + workWG.Done() + }).AnyTimes() + + workWG.Wait() + pm.Stop() + + if atomic.LoadUint32(&finCnt) != uint32(len(tt.conns)) { + t.Errorf("finished count %v want %v",finCnt, len(tt.conns)) + } + }) + } +} + +func Test_peerManager_Stop(t *testing.T) { + // check if Stop is working. + tests := []struct { + name string + + prevStatus int32 + + wantStatus int32 + wantSentChannel bool + }{ + // never send to finish channel twice. + {"TInitial", initial, stopping, false}, + {"TRunning", running, stopping, true}, + {"TStopping", stopping, stopping, false}, + {"TStopped", stopped, stopped, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pm := &peerManager{ + logger: logger, + finishChannel: make(chan struct{}, 1), + } + + atomic.StoreInt32(&pm.status, tt.prevStatus) + pm.Stop() + + if atomic.LoadInt32(&pm.status) != tt.wantStatus { + t.Errorf("mansger status %v, want %v ", toMStatusName(atomic.LoadInt32(&pm.status)), + toMStatusName(tt.wantStatus)) + } + var sent bool + timeout := time.NewTimer(time.Millisecond << 6) + select { + case <-pm.finishChannel: + sent = true + case <-timeout.C: + sent = false + } + if sent != tt.wantSentChannel { + t.Errorf("signal sent %v, want %v ", sent, tt.wantSentChannel) + } + }) + } +} + +// It tests idempotent of Stop method +func Test_peerManager_StopInRun(t *testing.T) { + ctrl := gomock.NewController(t) + + // check if Stop is working. + tests := []struct { + name string + + callCnt int + wantStatus int32 + }{ + {"TStopOnce", 1, stopped}, + {"TStopTwice", 2, stopped}, + {"TInStopping", 3, stopped}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockNT := p2pmock.NewMockNetworkTransport(ctrl) + mockNT.EXPECT().AddStreamHandler(gomock.Any(), gomock.Any()).AnyTimes() + mockNT.EXPECT().RemoveStreamHandler(gomock.Any()).AnyTimes() + + mockPeerFinder := p2pmock.NewMockPeerFinder(ctrl) + mockWPManager := p2pmock.NewMockWaitingPeerManager(ctrl) + + pm := &peerManager{ + logger: logger, + nt: mockNT, + peerFinder: mockPeerFinder, + wpManager: mockWPManager, + + mutex: &sync.Mutex{}, + finishChannel: make(chan struct{}), + } + go pm.runManagePeers() + // wait status of pm is changed to running + for atomic.LoadInt32(&pm.status) != running { + time.Sleep(time.Millisecond) + } + // stopping will be done within one second if normal status + checkTimer := time.NewTimer(time.Second >> 3) + for i := 0; i < tt.callCnt; i++ { + pm.Stop() + time.Sleep(time.Millisecond << 6) + } + succ := false + failedTimeout := time.NewTimer(time.Second * 5) + + // check if status changed + VERIFYLOOP: + for { + select { + case <-checkTimer.C: + if atomic.LoadInt32(&pm.status) == tt.wantStatus { + succ = true + break VERIFYLOOP + } else { + checkTimer.Stop() + checkTimer.Reset(time.Second) + } + case <-failedTimeout.C: + break VERIFYLOOP + } + } + if !succ { + t.Errorf("mansger status %v, want %v within %v", toMStatusName(atomic.LoadInt32(&pm.status)), + toMStatusName(tt.wantStatus), time.Second*5) + } + }) + } +} + +func toMStatusName(status int32) string { + switch status { + case initial: + return "initial" + case running: + return "running" + case stopping: + return "stopping" + case stopped: + return "stopped" + default: + return "(invalid)" + strconv.Itoa(int(status)) + } +} diff --git a/p2p/pmap/polarisconnect_test.go b/p2p/pmap/polarisconnect_test.go deleted file mode 100644 index ecd649a1f..000000000 --- a/p2p/pmap/polarisconnect_test.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * @file - * @copyright defined in aergo/LICENSE.txt - */ - -package pmap - -import ( - "reflect" - "sync" - "testing" - - "github.com/aergoio/aergo/config" - "github.com/aergoio/aergo/p2p/p2pcommon" - "github.com/aergoio/aergo/p2p/p2pmock" - "github.com/aergoio/aergo/pkg/component" - "github.com/aergoio/aergo/types" - "github.com/golang/mock/gomock" - net "github.com/libp2p/go-libp2p-net" - peer "github.com/libp2p/go-libp2p-peer" -) - -// initSvc select Polarises to connect, or disable polaris -func TestPolarisConnectSvc_initSvc(t *testing.T) { - polarisIDMain, _ := peer.IDB58Decode("16Uiu2HAkuxyDkMTQTGFpmnex2SdfTVzYfPztTyK339rqUdsv3ZUa") - polarisIDTest, _ := peer.IDB58Decode("16Uiu2HAkvJTHFuJXxr15rFEHsJWnyn1QvGatW2E9ED9Mvy4HWjVF") - dummyPeerID2, _ := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") - polar2 := "/ip4/172.21.1.2/tcp/8915/p2p/16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm" - dummyPeerID3, _ := peer.IDB58Decode("16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD") - polar3 := "/ip4/172.22.2.3/tcp/8915/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD" - - customChainID := types.ChainID{Magic: "unittest.blocko.io"} - type args struct { - use bool - polarises []string - - chainID *types.ChainID - } - tests := []struct { - name string - args args - - wantCnt int - peerIDs []peer.ID - }{ - // - {"TAergoNoPolaris", args{false, nil, &ONEMainNet}, 0, []peer.ID{}}, - {"TAergoMainDefault", args{true, nil, &ONEMainNet}, 1, []peer.ID{polarisIDMain}}, - {"TAergoMainPlusCfg", args{true, []string{polar2, polar3}, &ONEMainNet}, 3, []peer.ID{polarisIDMain, dummyPeerID2, dummyPeerID3}}, - {"TAergoTestDefault", args{true, nil, &ONETestNet}, 1, []peer.ID{polarisIDTest}}, - {"TAergoTestPlusCfg", args{true, []string{polar2, polar3}, &ONETestNet}, 3, []peer.ID{polarisIDTest, dummyPeerID2, dummyPeerID3}}, - {"TCustom", args{true, nil, &customChainID}, 0, []peer.ID{}}, - {"TCustomPlusCfg", args{true, []string{polar2, polar3}, &customChainID}, 2, []peer.ID{dummyPeerID2, dummyPeerID3}}, - {"TWrongPolarisAddr", args{true, []string{"/ip4/256.256.1.1/tcp/8915/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD"}, &customChainID}, 0, []peer.ID{}}, - {"TWrongPolarisAddr2", args{true, []string{"/egwgew5/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD"}, &customChainID}, 0, []peer.ID{}}, - {"TWrongPolarisAddr3", args{true, []string{"/dns/nowhere1234.aergo.io/tcp/8915/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD"}, &customChainID}, 0, []peer.ID{}}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - mockNT := p2pmock.NewMockNetworkTransport(ctrl) - pmapDummyNTC.nt = mockNT - pmapDummyNTC.chainID = tt.args.chainID - - cfg := config.NewServerContext("", "").GetDefaultP2PConfig() - cfg.NPUsePolaris = tt.args.use - cfg.NPAddPolarises = tt.args.polarises - - pcs := NewPolarisConnectSvc(cfg, pmapDummyNTC) - - if len(pcs.mapServers) != tt.wantCnt { - t.Errorf("NewPolarisConnectSvc() = %v, want %v", len(pcs.mapServers), tt.wantCnt) - } - for _, wantPeerID := range tt.peerIDs { - found := false - for _, polarisMeta := range pcs.mapServers { - if wantPeerID == polarisMeta.ID { - found = true - break - } - } - if !found { - t.Errorf("initSvc() want exist %v but not ", wantPeerID) - } - } - }) - } -} - -func TestPolarisConnectSvc_BeforeStop(t *testing.T) { - - type fields struct { - BaseComponent *component.BaseComponent - } - tests := []struct { - name string - fields fields - - calledStreamHandler bool - }{ - {"TNot", fields{}, false}, - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctrl := gomock.NewController(t) - - mockNT := p2pmock.NewMockNetworkTransport(ctrl) - pmapDummyNTC.nt = mockNT - pms := NewPolarisConnectSvc(pmapDummyCfg.P2P, pmapDummyNTC) - - mockNT.EXPECT().AddStreamHandler(PolarisPingSub, gomock.Any()).Times(1) - mockNT.EXPECT().RemoveStreamHandler(PolarisPingSub).Times(1) - - pms.AfterStart() - - pms.BeforeStop() - - ctrl.Finish() - }) - } -} - -func TestPolarisConnectSvc_onPing(t *testing.T) { - type fields struct { - BaseComponent *component.BaseComponent - ChainID []byte - PrivateNet bool - mapServers []p2pcommon.PeerMeta - ntc p2pcommon.NTContainer - listen bool - nt p2pcommon.NetworkTransport - hc HealthCheckManager - rwmutex *sync.RWMutex - peerRegistry map[peer.ID]*peerState - } - type args struct { - s net.Stream - } - tests := []struct { - name string - fields fields - args args - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pms := &PolarisConnectSvc{ - BaseComponent: tt.fields.BaseComponent, - PrivateChain: tt.fields.PrivateNet, - mapServers: tt.fields.mapServers, - ntc: tt.fields.ntc, - nt: tt.fields.nt, - rwmutex: tt.fields.rwmutex, - } - pms.onPing(tt.args.s) - }) - } -} - -func TestPeerMapService_connectAndQuery(t *testing.T) { - type fields struct { - BaseComponent *component.BaseComponent - ChainID []byte - PrivateNet bool - mapServers []p2pcommon.PeerMeta - ntc p2pcommon.NTContainer - listen bool - nt p2pcommon.NetworkTransport - hc HealthCheckManager - rwmutex *sync.RWMutex - peerRegistry map[peer.ID]*peerState - } - type args struct { - mapServerMeta p2pcommon.PeerMeta - bestHash []byte - bestHeight uint64 - } - tests := []struct { - name string - fields fields - args args - want []*types.PeerAddress - wantErr bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pms := &PolarisConnectSvc{ - BaseComponent: tt.fields.BaseComponent, - PrivateChain: tt.fields.PrivateNet, - mapServers: tt.fields.mapServers, - ntc: tt.fields.ntc, - nt: tt.fields.nt, - rwmutex: tt.fields.rwmutex, - } - got, err := pms.connectAndQuery(tt.args.mapServerMeta, tt.args.bestHash, tt.args.bestHeight) - if (err != nil) != tt.wantErr { - t.Errorf("PolarisConnectSvc.connectAndQuery() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("PolarisConnectSvc.connectAndQuery() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestPolarisConnectSvc_sendRequest(t *testing.T) { - type fields struct { - BaseComponent *component.BaseComponent - ChainID []byte - PrivateNet bool - mapServers []p2pcommon.PeerMeta - ntc p2pcommon.NTContainer - listen bool - nt p2pcommon.NetworkTransport - hc HealthCheckManager - rwmutex *sync.RWMutex - peerRegistry map[peer.ID]*peerState - } - type args struct { - status *types.Status - mapServerMeta p2pcommon.PeerMeta - register bool - size int - wt p2pcommon.MsgWriter - } - tests := []struct { - name string - fields fields - args args - wantErr bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pms := &PolarisConnectSvc{ - BaseComponent: tt.fields.BaseComponent, - PrivateChain: tt.fields.PrivateNet, - mapServers: tt.fields.mapServers, - ntc: tt.fields.ntc, - nt: tt.fields.nt, - rwmutex: tt.fields.rwmutex, - } - if err := pms.sendRequest(tt.args.status, tt.args.mapServerMeta, tt.args.register, tt.args.size, tt.args.wt); (err != nil) != tt.wantErr { - t.Errorf("PolarisConnectSvc.sendRequest() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestPolarisConnectSvc_readResponse(t *testing.T) { - type fields struct { - BaseComponent *component.BaseComponent - ChainID []byte - PrivateNet bool - mapServers []p2pcommon.PeerMeta - ntc p2pcommon.NTContainer - listen bool - nt p2pcommon.NetworkTransport - hc HealthCheckManager - rwmutex *sync.RWMutex - peerRegistry map[peer.ID]*peerState - } - type args struct { - mapServerMeta p2pcommon.PeerMeta - rd p2pcommon.MsgReader - } - tests := []struct { - name string - fields fields - args args - want p2pcommon.Message - want1 *types.MapResponse - wantErr bool - }{ - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - pms := &PolarisConnectSvc{ - BaseComponent: tt.fields.BaseComponent, - PrivateChain: tt.fields.PrivateNet, - mapServers: tt.fields.mapServers, - ntc: tt.fields.ntc, - nt: tt.fields.nt, - rwmutex: tt.fields.rwmutex, - } - got, got1, err := pms.readResponse(tt.args.mapServerMeta, tt.args.rd) - if (err != nil) != tt.wantErr { - t.Errorf("PolarisConnectSvc.readResponse() error = %v, wantErr %v", err, tt.wantErr) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("PolarisConnectSvc.readResponse() got = %v, want %v", got, tt.want) - } - if !reflect.DeepEqual(got1, tt.want1) { - t.Errorf("PolarisConnectSvc.readResponse() got1 = %v, want %v", got1, tt.want1) - } - }) - } -} diff --git a/p2p/protobufHelper.go b/p2p/protobufHelper.go index 724886478..a3d7ec063 100644 --- a/p2p/protobufHelper.go +++ b/p2p/protobufHelper.go @@ -9,8 +9,8 @@ import ( "time" "github.com/aergoio/aergo/internal/enc" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/types" ) @@ -105,11 +105,34 @@ func (pr *pbResponseOrder) SendTo(pi p2pcommon.RemotePeer) error { type pbBlkNoticeOrder struct { pbMessageOrder blkHash []byte + blkNo uint64 } func (pr *pbBlkNoticeOrder) SendTo(pi p2pcommon.RemotePeer) error { p := pi.(*remotePeerImpl) var blkhash = types.ToBlockID(pr.blkHash) + passedTime := time.Now().Sub(p.lastBlkNoticeTime) + skipNotice := false + if p.LastStatus().BlockNumber >= pr.blkNo { + heightDiff := p.LastStatus().BlockNumber - pr.blkNo + switch { + case heightDiff >= GapToSkipAll: + skipNotice = true + case heightDiff >= GapToSkipHourly: + skipNotice = p.skipCnt < GapToSkipHourly + default: + skipNotice = p.skipCnt < GapToSkip5Min + } + } + if skipNotice || passedTime < MinNewBlkNotiInterval { + p.skipCnt++ + if p.skipCnt&0x03ff == 0 { + p.logger.Debug().Str(p2putil.LogPeerName, p.Name()).Str(p2putil.LogProtoID, pr.GetProtocolID().String()).Int32("skip_cnt", p.skipCnt).Msg("Skipped NewBlockNotice ") + + } + return nil + } + if ok, _ := p.blkHashCache.ContainsOrAdd(blkhash, cachePlaceHolder); ok { // the remote peer already know this block hash. skip it // too many not-insteresting log, @@ -122,6 +145,11 @@ func (pr *pbBlkNoticeOrder) SendTo(pi p2pcommon.RemotePeer) error { p.logger.Warn().Str(p2putil.LogPeerName, p.Name()).Str(p2putil.LogProtoID, pr.GetProtocolID().String()).Str(p2putil.LogMsgID, pr.GetMsgID().String()).Err(err).Msg("fail to SendTo") return err } + p.lastBlkNoticeTime = time.Now() + if p.skipCnt > 100 { + p.logger.Debug().Str(p2putil.LogPeerName, p.Name()).Str(p2putil.LogProtoID, pr.GetProtocolID().String()).Int32("skip_cnt", p.skipCnt).Msg("Send NewBlockNotice after long skip") + } + p.skipCnt = 0 return nil } diff --git a/p2p/protobufHelper_test.go b/p2p/protobufHelper_test.go index b8600294f..fd6ce3101 100644 --- a/p2p/protobufHelper_test.go +++ b/p2p/protobufHelper_test.go @@ -7,7 +7,9 @@ package p2p import ( "fmt" + "math/rand" "testing" + "time" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2pmock" @@ -23,7 +25,7 @@ func Test_pbRequestOrder_SendTo(t *testing.T) { defer ctrl.Finish() sampleMeta := p2pcommon.PeerMeta{ID: samplePeerID, IPAddress: "192.168.1.2", Port: 7845} - factory := &v030MOFactory{} + factory := &baseMOFactory{} tests := []struct { name string @@ -67,7 +69,7 @@ func Test_pbMessageOrder_SendTo(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() sampleMeta := p2pcommon.PeerMeta{ID: samplePeerID, IPAddress: "192.168.1.2", Port: 7845} - factory := &v030MOFactory{} + factory := &baseMOFactory{} tests := []struct { name string @@ -108,7 +110,7 @@ func Test_pbBlkNoticeOrder_SendTo(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() sampleMeta := p2pcommon.PeerMeta{ID: samplePeerID, IPAddress: "192.168.1.2", Port: 7845} - factory := &v030MOFactory{} + factory := &baseMOFactory{} tests := []struct { name string @@ -135,8 +137,9 @@ func Test_pbBlkNoticeOrder_SendTo(t *testing.T) { mockRW.EXPECT().WriteMsg(gomock.Any()).Return(tt.writeErr).Times(1) } peer := newRemotePeer(sampleMeta, 0, mockPeerManager, mockActorServ, logger, factory, &dummySigner{}, nil, mockRW) + peer.lastStatus = &types.LastBlockStatus{} - target := factory.NewMsgBlkBroadcastOrder(&types.NewBlockNotice{BlockHash: dummyBlockHash}) + target := factory.NewMsgBlkBroadcastOrder(&types.NewBlockNotice{BlockHash: dummyBlockHash, BlockNo:1}) msgID := sampleMsgID // notice broadcast is affected by cache // put dummy request information in cache @@ -157,11 +160,157 @@ func Test_pbBlkNoticeOrder_SendTo(t *testing.T) { } } +func Test_pbBlkNoticeOrder_SendTo_SkipByHeight(t *testing.T) { + allSendCnt := 3 + hashes := make([][]byte,allSendCnt) + for i:=0 ; i>1, 3,4}, + //// skip same or higher peer + //// the first notice is same and skip but seconds will be sent + {"TSamePeer", 0, 3, time.Second>>2, 2,3}, + {"TPartialHigh", 900, 3, time.Second>>2, 0,1}, + {"THighPeer", 10000, 3, time.Second>>2, 0,1}, + {"TVeryHighPeer", 100000, 3, time.Second>>2, 0,1}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockActorServ := p2pmock.NewMockActorService(ctrl) + mockPeerManager := p2pmock.NewMockPeerManager(ctrl) + mockRW := p2pmock.NewMockMsgReadWriter(ctrl) + + writeCnt := 0 + mockRW.EXPECT().WriteMsg(gomock.Any()).Do(func(arg interface{}) { + writeCnt++ + }).MinTimes(tt.wantSentLow) + + notiNo := uint64(99999) + peerBlkNo := uint64(int64(notiNo)+int64(tt.noDiff)) + peer := newRemotePeer(sampleMeta, 0, mockPeerManager, mockActorServ, logger, factory, &dummySigner{}, nil, mockRW) + peer.lastStatus = &types.LastBlockStatus{BlockNumber:peerBlkNo} + + skipMax := int32(0) + for i:=0; i0 { + // sleep tree times + time.Sleep(time.Second >> 2 ) + } + } + fmt.Printf("%v : Max skipCnt %v \n",tt.name, skipMax) + + }) + } +} + func Test_pbTxNoticeOrder_SendTo(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() sampleMeta := p2pcommon.PeerMeta{ID: samplePeerID, IPAddress: "192.168.1.2", Port: 7845} - factory := &v030MOFactory{} + factory := &baseMOFactory{} sampleHashes := make([][]byte, 10) for i := 0; i < 10; i++ { diff --git a/p2p/raftsupport/clusterreceiver.go b/p2p/raftsupport/clusterreceiver.go new file mode 100644 index 000000000..abee8d88a --- /dev/null +++ b/p2p/raftsupport/clusterreceiver.go @@ -0,0 +1,158 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package raftsupport + +import ( + "github.com/pkg/errors" + "sync" + "time" + + "github.com/aergoio/aergo/message" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/subproto" + "github.com/aergoio/aergo/types" + "github.com/golang/protobuf/proto" +) + +// ClusterInfoReceiver is send p2p getClusterInfo to connected peers and receive p2p responses one of peers return successful response +// The first version will be simplified version. it send and receive one by one. +type ClusterInfoReceiver struct { + mf p2pcommon.MoFactory + + peers []p2pcommon.RemotePeer + mutex sync.Mutex + sents map[p2pcommon.MsgID]p2pcommon.RemotePeer + offset int + + req *message.GetCluster + actor p2pcommon.ActorService + + ttl time.Duration + timeout time.Time + finished bool + status receiverStatus + + got []*types.Block + senderFinished chan interface{} +} + +type receiverStatus int32 + +const ( + receiverStatusWaiting receiverStatus = iota + receiverStatusCanceled + receiverStatusFinished +) + +func NewClusterInfoReceiver(actor p2pcommon.ActorService, mf p2pcommon.MoFactory, peers []p2pcommon.RemotePeer, ttl time.Duration, req *message.GetCluster) *ClusterInfoReceiver { + return &ClusterInfoReceiver{actor: actor, mf: mf, peers: peers, ttl: ttl, req: req, sents: make(map[p2pcommon.MsgID]p2pcommon.RemotePeer)} +} + +func (br *ClusterInfoReceiver) StartGet() { + br.timeout = time.Now().Add(br.ttl) + // create message data + // send message to first peer + go func() { + br.mutex.Lock() + defer br.mutex.Unlock() + if !br.trySendNextPeer() { + br.cancelReceiving(errors.New("no live peers"), false) + } + }() +} + +func (br *ClusterInfoReceiver) trySendNextPeer() bool { + for ; br.offset < len(br.peers); br.offset++ { + peer := br.peers[br.offset] + if peer.State() == types.RUNNING { + br.offset++ + mo := br.mf.NewMsgBlockRequestOrder(br.ReceiveResp, subproto.GetClusterRequest, &types.GetClusterInfoRequest{}) + peer.SendMessage(mo) + br.sents[mo.GetMsgID()] = peer + return true + } + } + return false +} + +// ReceiveResp must be called just in read go routine +func (br *ClusterInfoReceiver) ReceiveResp(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) (ret bool) { + // cases in waiting + // normal not status => wait + // normal status (last response) => finish + // abnormal resp (no following resp expected): hasNext is true => cancel + // abnormal resp (following resp expected): hasNext is false, or invalid resp data type (maybe remote peer is totally broken) => cancel finish + // case in status or status + ret = true + br.mutex.Lock() + defer br.mutex.Unlock() + // consuming request id at first + peer, exist := br.sents[msg.OriginalID()] + if exist { + delete(br.sents, msg.OriginalID()) + peer.ConsumeRequest(msg.OriginalID()) + } + + status := br.status + switch status { + case receiverStatusWaiting: + br.handleInWaiting(msg, msgBody) + case receiverStatusCanceled: + fallthrough + case receiverStatusFinished: + fallthrough + default: + br.ignoreMsg(msg, msgBody) + return + } + return +} + +func (br *ClusterInfoReceiver) handleInWaiting(msg p2pcommon.Message, msgBody proto.Message) { + // timeout + if br.timeout.Before(time.Now()) { + // silently ignore already finished job + br.finishReceiver() + return + } + + // remote peer response malformed data. + body, ok := msgBody.(*types.GetClusterInfoResponse) + if !ok || len(body.MbrAttrs) == 0 || body.Error != "" { + // TODO log fail reason? + if !br.trySendNextPeer() { + br.cancelReceiving(errors.New("no live peers"), false) + } + return + } + + // return the result + br.finishReceiver() + result := &message.GetClusterRsp{ChainID: body.GetChainID(), Members: body.GetMbrAttrs(), Err: nil} + br.req.ReplyC <- result + close(br.req.ReplyC) + return +} + +// cancelReceiving is cancel wait for receiving and return the failure result. +// it wait remaining (and useless) response. It is assumed cancelings are not frequently occur +func (br *ClusterInfoReceiver) cancelReceiving(err error, hasNext bool) { + br.status = receiverStatusCanceled + result := &message.GetClusterRsp{Err: err} + br.req.ReplyC <- result + close(br.req.ReplyC) + br.finishReceiver() +} + +// finishReceiver is to cancel works, assuming cancelings are not frequently occur +func (br *ClusterInfoReceiver) finishReceiver() { + br.status = receiverStatusFinished +} + +// ignoreMsg is silently ignore following responses, which is not useless anymore. +func (br *ClusterInfoReceiver) ignoreMsg(msg p2pcommon.Message, msgBody proto.Message) { + // nothing to do for now +} diff --git a/p2p/raftsupport/clusterreceiver_test.go b/p2p/raftsupport/clusterreceiver_test.go new file mode 100644 index 000000000..09738fd78 --- /dev/null +++ b/p2p/raftsupport/clusterreceiver_test.go @@ -0,0 +1,252 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package raftsupport + +import ( + "bytes" + "sync/atomic" + "testing" + "time" + + "github.com/aergoio/aergo/message" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/aergoio/aergo/p2p/subproto" + "github.com/aergoio/aergo/types" + "github.com/golang/mock/gomock" + peer "github.com/libp2p/go-libp2p-peer" +) + +func TestStartGet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + type args struct { + peerCnt int + timeout time.Duration + } + tests := []struct { + name string + args args + + wantSentCnt int // count of sent to remote peers + wantTimeout bool // whether reciever returns result or not (=timeout) + wantErrResp bool // result with error or not + }{ + {"TTimeout", args{peerCnt: 1}, 1, true, false}, + {"TNoPeers", args{peerCnt: 0}, 0, false, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockActor := p2pmock.NewMockActorService(ctrl) + mockMF := p2pmock.NewMockMoFactory(ctrl) + mockMo := createDummyMo(ctrl) + mockMF.EXPECT().NewMsgBlockRequestOrder(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockMo).Times(tt.wantSentCnt) + peers := make([]p2pcommon.RemotePeer, 0, tt.args.peerCnt) + for i := 0; i < tt.args.peerCnt; i++ { + dummyPeerID, _ := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") + peers = append(peers, createDummyPeer(ctrl, dummyPeerID, types.RUNNING)) + } + replyChan := make(chan *message.GetClusterRsp) + dummyReq := &message.GetCluster{ReplyC: replyChan} + target := NewClusterInfoReceiver(mockActor, mockMF, peers, time.Millisecond, dummyReq) + target.StartGet() + + if !tt.wantTimeout { + timer := time.NewTimer(time.Second * 2) + select { + case resp := <-replyChan: + if (resp.Err != nil) != tt.wantErrResp { + t.Errorf("resp error %v, wantErr %v ", resp.Err, tt.wantErrResp) + } + case <-timer.C: + t.Errorf("timeout occurred, want no time") + } + } else { + timer := time.NewTimer(time.Millisecond * 100) + select { + case resp := <-replyChan: + t.Errorf("unexpected response (%d mems, err:%v), want timeout", len(resp.Members), resp.Err) + case <-timer.C: + // expected timeout + } + } + }) + } +} + +func createDummyPeer(ctrl *gomock.Controller, pid peer.ID, state types.PeerState) *p2pmock.MockRemotePeer { + mockPeer := p2pmock.NewMockRemotePeer(ctrl) + mockPeer.EXPECT().State().Return(state).AnyTimes() + mockPeer.EXPECT().ID().Return(pid).AnyTimes() + mockPeer.EXPECT().ConsumeRequest(gomock.Any()).AnyTimes() + mockPeer.EXPECT().SendMessage(gomock.Any()).AnyTimes() + return mockPeer +} + +func createDummyMo(ctrl *gomock.Controller) *p2pmock.MockMsgOrder { + dummyMo := p2pmock.NewMockMsgOrder(ctrl) + dummyMo.EXPECT().IsNeedSign().Return(true).AnyTimes() + dummyMo.EXPECT().IsRequest().Return(true).AnyTimes() + dummyMo.EXPECT().GetProtocolID().Return(subproto.NewTxNotice).AnyTimes() + dummyMo.EXPECT().GetMsgID().Return(p2pcommon.NewMsgID()).AnyTimes() + return dummyMo +} + +func TestClusterInfoReceiver_trySendNextPeer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + type args struct { + stats []int + } + tests := []struct { + name string + args args + + wantSentCnt int + }{ + {"TAllRunning", args{[]int{1, 1, 1, 1, 1}}, 5}, + {"TNoPeers", args{[]int{}}, 0}, + {"TNoRunning", args{[]int{0, 0, 0, 0, 0}}, 0}, + {"TMixed", args{[]int{0, 0, 1, 1, 1}}, 3}, + {"TMixed2", args{[]int{1, 1, 0, 0, 0}}, 2}, + {"TMixed3", args{[]int{1, 0, 1, 0, 0, 1}}, 3}, + {"TMixed4", args{[]int{0, 1, 0, 1, 0, 1, 0}}, 3}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockActor := p2pmock.NewMockActorService(ctrl) + mockMF := p2pmock.NewMockMoFactory(ctrl) + mockMo := createDummyMo(ctrl) + mockMF.EXPECT().NewMsgBlockRequestOrder(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockMo).Times(tt.wantSentCnt) + peers := make([]p2pcommon.RemotePeer, 0, len(tt.args.stats)) + for _, run := range tt.args.stats { + dummyPeerID, _ := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") + stat := types.RUNNING + if run == 0 { + stat = types.STOPPING + } + peers = append(peers, createDummyPeer(ctrl, dummyPeerID, stat)) + } + + sentCnt := 0 + replyChan := make(chan *message.GetClusterRsp) + dummyReq := &message.GetCluster{ReplyC: replyChan} + target := NewClusterInfoReceiver(mockActor, mockMF, peers, time.Millisecond, dummyReq) + for target.trySendNextPeer() { + sentCnt++ + } + + if sentCnt != tt.wantSentCnt { + t.Errorf("resp error %v, wantErr %v ", sentCnt, tt.wantSentCnt) + } + }) + } +} + +func TestClusterInfoReceiver_ReceiveResp(t *testing.T) { + sampleChainID := []byte("testChain") + members := make([]*types.MemberAttr,4) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + type args struct { + stats []int + } + tests := []struct { + name string + args args + + wantSentCnt int // count of sent to remote peers + wantTimeout bool // whether reciever returns result or not (=timeout) + wantErrResp bool // result with error or not + }{ + {"TAllRet", args{[]int{1, 1, 1, 1, 1}}, 1, false,false}, + {"TErrRet", args{[]int{0, 0, 0, 0, 0}}, 5, false, true}, + {"TMixed", args{[]int{0, 0, 1, 1, 1}}, 3, false, false}, + {"TTimeout", args{[]int{0, 0}}, 3, true, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + peers := make([]p2pcommon.RemotePeer, 0, len(tt.args.stats)) + + mockActor := p2pmock.NewMockActorService(ctrl) + mockMF := p2pmock.NewMockMoFactory(ctrl) + mockMo := createDummyMo(ctrl) + mockMF.EXPECT().NewMsgBlockRequestOrder(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockMo).Times(tt.wantSentCnt) + + replyChan := make(chan *message.GetClusterRsp) + dummyReq := &message.GetCluster{ReplyC: replyChan} + target := NewClusterInfoReceiver(mockActor, mockMF, peers, time.Second, dummyReq) + + seq := int32(0) + for i:=0; i<5; i++ { + dummyPeerID, _ := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") + stat := types.RUNNING + mockPeer := p2pmock.NewMockRemotePeer(ctrl) + mockPeer.EXPECT().State().Return(stat).AnyTimes() + mockPeer.EXPECT().ID().Return(dummyPeerID).AnyTimes() + mockPeer.EXPECT().ConsumeRequest(gomock.Any()).AnyTimes() + mockPeer.EXPECT().SendMessage(gomock.Any()).Do(func(mo p2pcommon.MsgOrder) { + time.Sleep(time.Millisecond*5) + callSeq := atomic.LoadInt32(&seq) + msg := p2pmock.NewMockMessage(ctrl) + msg.EXPECT().ID().Return(p2pcommon.NewMsgID()).AnyTimes() + msg.EXPECT().OriginalID().Return(p2pcommon.NewMsgID()).AnyTimes() + msg.EXPECT().Timestamp().Return(time.Now().UnixNano()).AnyTimes() + msg.EXPECT().Subprotocol().Return(subproto.GetClusterResponse).AnyTimes() + if callSeq < int32(len(tt.args.stats)) { + err := "" + if tt.args.stats[callSeq] == 0 { + err = "getcluster fail" + } + body := &types.GetClusterInfoResponse{ChainID:sampleChainID, MbrAttrs:members, Error:err} + atomic.AddInt32(&seq, 1) + go target.ReceiveResp(msg, body) + } else { + atomic.AddInt32(&seq, 1) + } + }).MaxTimes(1) + peers = append(peers, mockPeer) + } + // forcely inject peers + target.peers = peers + + target.StartGet() + + if !tt.wantTimeout { + timer := time.NewTimer(time.Second * 2) + select { + case resp := <-replyChan: + if (resp.Err != nil) != tt.wantErrResp { + t.Errorf("resp error %v, wantErr %v ", resp.Err, tt.wantErrResp) + } + // receiver return valid result + if !tt.wantErrResp { + if !bytes.Equal(resp.ChainID, sampleChainID) { + t.Errorf("resp chainid %v, want %v ",resp.ChainID, sampleChainID) + } + if len(resp.Members) != len(members) { + t.Errorf("resp members %v, want %v ",resp.Members, len(members)) + } + } + case <-timer.C: + t.Errorf("timeout occurred, want no time") + } + } else { + timer := time.NewTimer(time.Millisecond * 100) + select { + case resp := <-replyChan: + t.Errorf("unexpected response (%d mems, err:%v), want timeout", len(resp.Members), resp.Err) + case <-timer.C: + // expected timeout + } + } + + }) + } +} diff --git a/p2p/reconnect.go b/p2p/reconnect.go index 7e8f1a5da..25253117f 100644 --- a/p2p/reconnect.go +++ b/p2p/reconnect.go @@ -7,11 +7,6 @@ package p2p import ( "math" "time" - - "github.com/aergoio/aergo/p2p/p2pcommon" - "github.com/aergoio/aergo/p2p/p2putil" - - "github.com/aergoio/aergo-lib/log" ) var ( @@ -24,40 +19,6 @@ func init() { durations = generateExpDuration(20, 0.6, maxTrial) } -type reconnectJob struct { - meta p2pcommon.PeerMeta - trial int - pm p2pcommon.PeerManager - logger *log.Logger - - cancel chan struct{} -} - -func newReconnectRunner(meta p2pcommon.PeerMeta, pm p2pcommon.PeerManager, logger *log.Logger) *reconnectJob { - return &reconnectJob{meta: meta, trial: 0, pm: pm, cancel: make(chan struct{}, 1), logger: logger} -} -func (rj *reconnectJob) runJob() { - timer := time.NewTimer(getNextInterval(rj.trial)) -RETRYLOOP: - for { - // wait for duration - select { - case <-timer.C: - _, found := rj.pm.GetPeer(rj.meta.ID) - if found { - break RETRYLOOP - } - rj.logger.Debug().Str("peer_meta", p2putil.ShortMetaForm(rj.meta)).Int("trial", rj.trial).Msg("Trying to connect") - rj.pm.AddNewPeer(rj.meta) - rj.trial++ - timer.Reset(getNextInterval(rj.trial)) - case <-rj.cancel: - break RETRYLOOP - } - } - //rj.rm.jobFinished(rj.meta.ID) -} - func getNextInterval(trial int) time.Duration { if trial < maxTrial { return durations[trial] diff --git a/p2p/remotepeer.go b/p2p/remotepeer.go index 3f1ad88a2..b2731e066 100644 --- a/p2p/remotepeer.go +++ b/p2p/remotepeer.go @@ -7,6 +7,8 @@ package p2p import ( "fmt" + "github.com/pkg/errors" + "runtime/debug" "sync" "time" @@ -14,7 +16,6 @@ import ( "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/p2p/metric" - "github.com/golang/protobuf/proto" net "github.com/libp2p/go-libp2p-net" lru "github.com/hashicorp/golang-lru" @@ -25,11 +26,8 @@ import ( peer "github.com/libp2p/go-libp2p-peer" ) -var TimeoutError error - -func init() { - TimeoutError = fmt.Errorf("timeout") -} +var TimeoutError = errors.New("timeout") +var CancelError = errors.New("canceled") type requestInfo struct { cTime time.Time @@ -37,13 +35,6 @@ type requestInfo struct { receiver p2pcommon.ResponseReceiver } -// ResponseReceiver returns true when receiver handled it, or false if this receiver is not the expected handler. -// NOTE: the return value is temporal works for old implementation and will be remove later. - -func dummyResponseReceiver(msg p2pcommon.Message, msgBody proto.Message) bool { - return false -} - // remotePeerImpl represent remote peer to which is connected type remotePeerImpl struct { logger *log.Logger @@ -72,10 +63,12 @@ type remotePeerImpl struct { handlers map[p2pcommon.SubProtocol]p2pcommon.MessageHandler // TODO make automatic disconnect if remote peer cause too many wrong message - blkHashCache *lru.Cache txHashCache *lru.Cache - lastNotice *types.LastBlockStatus + lastStatus *types.LastBlockStatus + // lastBlkNoticeTime is time that local peer sent NewBlockNotice to this remote peer + lastBlkNoticeTime time.Time + skipCnt int32 txQueueLock *sync.Mutex txNoticeQueue *p2putil.PressableQueue @@ -96,7 +89,7 @@ func newRemotePeer(meta p2pcommon.PeerMeta, manageNum uint32, pm p2pcommon.PeerM pingDuration: defaultPingInterval, state: types.STARTING, - lastNotice: &types.LastBlockStatus{}, + lastStatus: &types.LastBlockStatus{}, stopChan: make(chan struct{}, 1), closeWrite: make(chan struct{}), @@ -142,6 +135,14 @@ func (p *remotePeerImpl) Name() string { return p.name } +func (p *remotePeerImpl) Version() string { + return p.meta.Version +} + +func (p *remotePeerImpl) AddMessageHandler(subProtocol p2pcommon.SubProtocol, handler p2pcommon.MessageHandler) { + p.handlers[subProtocol] = handler +} + func (p *remotePeerImpl) MF() p2pcommon.MoFactory { return p.mf } @@ -151,8 +152,8 @@ func (p *remotePeerImpl) State() types.PeerState { return p.state.Get() } -func (p *remotePeerImpl) LastNotice() *types.LastBlockStatus { - return p.lastNotice +func (p *remotePeerImpl) LastStatus() *types.LastBlockStatus { + return p.lastStatus } // runPeer should be called by go routine @@ -195,7 +196,7 @@ func (p *remotePeerImpl) runWrite() { cleanupTicker := time.NewTicker(cleanRequestInterval) defer func() { if r := recover(); r != nil { - p.logger.Panic().Str(p2putil.LogPeerName, p.Name()).Str("recover", fmt.Sprint(r)).Msg("There were panic in runWrite ") + p.logger.Panic().Str("callstack", string(debug.Stack())).Str(p2putil.LogPeerName, p.Name()).Str("recover", fmt.Sprint(r)).Msg("There were panic in runWrite ") } }() @@ -258,7 +259,7 @@ func (p *remotePeerImpl) handleMsg(msg p2pcommon.Message) error { subProto := msg.Subprotocol() defer func() { if r := recover(); r != nil { - p.logger.Error().Interface("panic", r).Msg("There were panic in handler.") + p.logger.Error().Str(p2putil.LogProtoID, subProto.String()).Str("callstack", string(debug.Stack())).Interface("panic", r).Msg("There were panic in handler.") err = fmt.Errorf("internal error") } }() @@ -362,12 +363,12 @@ func (p *remotePeerImpl) ConsumeRequest(originalID p2pcommon.MsgID) { } // requestIDNotFoundReceiver is to handle response msg which the original message is not identified -func (p *remotePeerImpl) requestIDNotFoundReceiver(msg p2pcommon.Message, msgBody proto.Message) bool { +func (p *remotePeerImpl) requestIDNotFoundReceiver(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) bool { return true } // passThroughReceiver is bypass message to legacy handler. -func (p *remotePeerImpl) passThroughReceiver(msg p2pcommon.Message, msgBody proto.Message) bool { +func (p *remotePeerImpl) passThroughReceiver(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) bool { return false } @@ -494,7 +495,7 @@ func (p *remotePeerImpl) UpdateTxCache(hashes []types.TxID) []types.TxID { } func (p *remotePeerImpl) UpdateLastNotice(blkHash []byte, blkNumber uint64) { - p.lastNotice = &types.LastBlockStatus{time.Now(), blkHash, blkNumber} + p.lastStatus = &types.LastBlockStatus{time.Now(), blkHash, blkNumber} } func (p *remotePeerImpl) sendGoAway(msg string) { diff --git a/p2p/remotepeer_test.go b/p2p/remotepeer_test.go index caf5281be..d1fab4b9a 100644 --- a/p2p/remotepeer_test.go +++ b/p2p/remotepeer_test.go @@ -16,7 +16,6 @@ import ( "github.com/aergoio/aergo/p2p/subproto" "github.com/gofrs/uuid" "github.com/golang/mock/gomock" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/aergoio/aergo/p2p/p2pmock" @@ -121,9 +120,9 @@ func TestRemotePeer_pruneRequests(t *testing.T) { p := newRemotePeer(sampleMeta, 0, mockPeerManager, mockActorServ, logger, nil, nil, mockStream, nil) t.Run(tt.name, func(t *testing.T) { mid1, mid2, midn := p2pcommon.NewMsgID(), p2pcommon.NewMsgID(), p2pcommon.NewMsgID() - p.requests[mid1] = &requestInfo{cTime: time.Now().Add(time.Minute * -61), reqMO: &pbRequestOrder{pbMessageOrder{message: &V030Message{id: mid1}}, nil}} - p.requests[mid2] = &requestInfo{cTime: time.Now().Add(time.Minute * -60).Add(time.Second * -1), reqMO: &pbRequestOrder{pbMessageOrder{message: &V030Message{id: mid2}}, nil}} - p.requests[midn] = &requestInfo{cTime: time.Now().Add(time.Minute * -59), reqMO: &pbRequestOrder{pbMessageOrder{message: &V030Message{id: midn}}, nil}} + p.requests[mid1] = &requestInfo{cTime: time.Now().Add(time.Minute * -61), reqMO: &pbRequestOrder{pbMessageOrder{message: &MessageValue{id: mid1}}, nil}} + p.requests[mid2] = &requestInfo{cTime: time.Now().Add(time.Minute * -60).Add(time.Second * -1), reqMO: &pbRequestOrder{pbMessageOrder{message: &MessageValue{id: mid2}}, nil}} + p.requests[midn] = &requestInfo{cTime: time.Now().Add(time.Minute * -59), reqMO: &pbRequestOrder{pbMessageOrder{message: &MessageValue{id: midn}}, nil}} p.pruneRequests() assert.Equal(t, 1, len(p.requests)) @@ -341,10 +340,10 @@ func TestRemotePeerImpl_UpdateBlkCache(t *testing.T) { for _, hash := range test.inCache { target.blkHashCache.Add(hash, true) } - target.lastNotice = &types.LastBlockStatus{BlockHash: test.prevLastBlk[:], BlockNumber: 0, CheckTime: time.Now()} + target.lastStatus = &types.LastBlockStatus{BlockHash: test.prevLastBlk[:], BlockNumber: 0, CheckTime: time.Now()} actual := target.UpdateBlkCache(test.hash[:], 0) assert.Equal(t, test.expected, actual) - assert.True(t, bytes.Equal(test.hash[:], target.LastNotice().BlockHash)) + assert.True(t, bytes.Equal(test.hash[:], target.LastStatus().BlockHash)) }) } } @@ -387,7 +386,7 @@ func TestRemotePeerImpl_GetReceiver(t *testing.T) { for i := 0; i < idSize; i++ { idList[i] = p2pcommon.NewMsgID() if i < 5 { - recvList[idList[i]] = func(msg p2pcommon.Message, msgBody proto.Message) bool { + recvList[idList[i]] = func(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) bool { logger.Debug().Int("seq", i).Msg("receiver called") return true } @@ -422,7 +421,7 @@ func TestRemotePeerImpl_GetReceiver(t *testing.T) { } actual := p.GetReceiver(test.inID) assert.NotNil(t, actual) - dummyMsg := &V030Message{id: p2pcommon.NewMsgID(), originalID: test.inID} + dummyMsg := p2pcommon.NewSimpleRespMsgVal(subproto.AddressesResponse, p2pcommon.NewMsgID(), test.inID) assert.Equal(t, test.receiverReturn, actual(dummyMsg, nil)) // after consuming request, GetReceiver always return requestIDNotFoundReceiver, which always return true diff --git a/p2p/subproto/addrs.go b/p2p/subproto/addrs.go index ca5ece1f1..0d71b3c68 100644 --- a/p2p/subproto/addrs.go +++ b/p2p/subproto/addrs.go @@ -10,7 +10,6 @@ import ( "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" peer "github.com/libp2p/go-libp2p-peer" ) @@ -33,11 +32,11 @@ func NewAddressesReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, return ph } -func (ph *addressesRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (ph *addressesRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.AddressesRequest{}) } -func (ph *addressesRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (ph *addressesRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { peerID := ph.peer.ID() remotePeer := ph.peer data := msgBody.(*types.AddressesRequest) @@ -98,11 +97,11 @@ func NewAddressesRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer return ph } -func (ph *addressesResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (ph *addressesResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.AddressesResponse{}) } -func (ph *addressesResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (ph *addressesResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := ph.peer data := msgBody.(*types.AddressesResponse) p2putil.DebugLogReceiveResponseMsg(ph.logger, ph.protocol, msg.ID().String(), msg.OriginalID().String(), remotePeer, len(data.GetPeers())) diff --git a/p2p/subproto/base.go b/p2p/subproto/base.go index 09d8ae214..664cff863 100644 --- a/p2p/subproto/base.go +++ b/p2p/subproto/base.go @@ -5,8 +5,6 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/p2p/p2pcommon" - - "github.com/golang/protobuf/proto" ) // func(msg *types.P2PMessage) @@ -22,10 +20,9 @@ type BaseMsgHandler struct { logger *log.Logger timestamp time.Time - prototype proto.Message } -func (bh *BaseMsgHandler) CheckAuth(msg p2pcommon.Message, msgBody proto.Message) error { +func (bh *BaseMsgHandler) CheckAuth(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) error { // check permissions // or etc... @@ -36,7 +33,7 @@ func (bh *BaseMsgHandler) PreHandle() { bh.timestamp = time.Now() } -func (bh *BaseMsgHandler) PostHandle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *BaseMsgHandler) PostHandle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { bh.logger.Debug(). Str("elapsed", time.Since(bh.timestamp).String()). Str("protocol", msg.Subprotocol().String()). diff --git a/p2p/subproto/block.go b/p2p/subproto/block.go index 3ba88a8df..aa301f33b 100644 --- a/p2p/subproto/block.go +++ b/p2p/subproto/block.go @@ -11,10 +11,9 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/internal/enc" "github.com/aergoio/aergo/message" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" ) type listBlockHeadersRequestHandler struct { @@ -53,11 +52,11 @@ func NewListBlockHeadersReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.Remo return bh } -func (bh *listBlockHeadersRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *listBlockHeadersRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetBlockHeadersRequest{}) } -func (bh *listBlockHeadersRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *listBlockHeadersRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetBlockHeadersRequest) p2putil.DebugLogReceiveMsg(bh.logger, bh.protocol, msg.ID().String(), remotePeer, data) @@ -116,11 +115,11 @@ func NewListBlockRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer return bh } -func (bh *listBlockHeadersResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *listBlockHeadersResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetBlockHeadersResponse{}) } -func (bh *listBlockHeadersResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *listBlockHeadersResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetBlockHeadersResponse) p2putil.DebugLogReceiveResponseMsg(bh.logger, bh.protocol, msg.ID().String(), msg.OriginalID().String(), bh.peer, len(data.Hashes)) @@ -137,11 +136,11 @@ func NewNewBlockNoticeHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePee return bh } -func (bh *newBlockNoticeHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *newBlockNoticeHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.NewBlockNotice{}) } -func (bh *newBlockNoticeHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *newBlockNoticeHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.NewBlockNotice) // remove to verbose log @@ -177,11 +176,11 @@ func NewGetAncestorReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePee return bh } -func (bh *getAncestorRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *getAncestorRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetAncestorRequest{}) } -func (bh *getAncestorRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *getAncestorRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetAncestorRequest) status := types.ResultStatus_OK @@ -225,11 +224,11 @@ func NewGetAncestorRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePe return bh } -func (bh *getAncestorResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *getAncestorResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetAncestorResponse{}) } -func (bh *getAncestorResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *getAncestorResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { data := msgBody.(*types.GetAncestorResponse) p2putil.DebugLogReceiveResponseMsg(bh.logger, bh.protocol, msg.ID().String(), msg.OriginalID().String(), bh.peer, fmt.Sprintf("status=%d, ancestor hash=%s,no=%d", data.Status, enc.ToString(data.AncestorHash), data.AncestorNo)) diff --git a/p2p/subproto/blockhash.go b/p2p/subproto/blockhash.go index 1833c937e..f9dfc4cd6 100644 --- a/p2p/subproto/blockhash.go +++ b/p2p/subproto/blockhash.go @@ -11,10 +11,9 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/internal/enc" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" ) type getHashRequestHandler struct { @@ -32,11 +31,11 @@ func NewGetHashesReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, return bh } -func (bh *getHashRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *getHashRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetHashesRequest{}) } -func (bh *getHashRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *getHashRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetHashesRequest) p2putil.DebugLogReceiveMsg(bh.logger, bh.protocol, msg.ID().String(), remotePeer, data) @@ -124,11 +123,11 @@ func NewGetHashesRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer return bh } -func (bh *getHashResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *getHashResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetHashesResponse{}) } -func (bh *getHashResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *getHashResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetHashesResponse) p2putil.DebugLogReceiveResponseMsg(bh.logger, bh.protocol, msg.ID().String(), msg.OriginalID().String(), bh.peer, fmt.Sprintf("blk_cnt=%d,hasNext=%t", len(data.Hashes), data.HasNext)) @@ -152,11 +151,11 @@ func NewGetHashByNoReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePee return bh } -func (bh *getHashByNoRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *getHashByNoRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetHashByNo{}) } -func (bh *getHashByNoRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *getHashByNoRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetHashByNo) p2putil.DebugLogReceiveMsg(bh.logger, bh.protocol, msg.ID().String(), remotePeer, data) @@ -186,11 +185,11 @@ func NewGetHashByNoRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePe return bh } -func (bh *getHashByNoResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *getHashByNoResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetHashByNoResponse{}) } -func (bh *getHashByNoResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *getHashByNoResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { data := msgBody.(*types.GetHashByNoResponse) p2putil.DebugLogReceiveResponseMsg(bh.logger, bh.protocol, msg.ID().String(), msg.OriginalID().String(), bh.peer, fmt.Sprintf("%s=%s", p2putil.LogBlkHash, enc.ToString(data.BlockHash))) diff --git a/p2p/subproto/blockhash_test.go b/p2p/subproto/blockhash_test.go index db4517acf..4085116be 100644 --- a/p2p/subproto/blockhash_test.go +++ b/p2p/subproto/blockhash_test.go @@ -268,15 +268,15 @@ type testDoubleHashesRespFactory struct { lastStatus types.ResultStatus } -func (f *testDoubleHashesRespFactory) NewMsgRequestOrder(expecteResponse bool, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (f *testDoubleHashesRespFactory) NewMsgRequestOrder(expecteResponse bool, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { panic("implement me") } -func (f *testDoubleHashesRespFactory) NewMsgBlockRequestOrder(respReceiver p2pcommon.ResponseReceiver, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (f *testDoubleHashesRespFactory) NewMsgBlockRequestOrder(respReceiver p2pcommon.ResponseReceiver, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { panic("implement me") } -func (f *testDoubleHashesRespFactory) NewMsgResponseOrder(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (f *testDoubleHashesRespFactory) NewMsgResponseOrder(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { f.lastResp = message.(*types.GetHashesResponse) f.lastStatus = f.lastResp.Status return &testMo{message:&testMessage{id:reqID, subProtocol:protocolID}} @@ -297,7 +297,7 @@ func (f *testDoubleHashesRespFactory) NewMsgBPBroadcastOrder(noticeMsg *types.Bl // testDoubleMOFactory keep last created message and last result status of response message type testDoubleMOFactory struct { - lastResp p2pcommon.PbMessage + lastResp p2pcommon.MessageBody lastStatus types.ResultStatus } @@ -313,15 +313,15 @@ func (f *testDoubleMOFactory) NewMsgBPBroadcastOrder(noticeMsg *types.BlockProdu panic("implement me") } -func (f *testDoubleMOFactory) NewMsgRequestOrder(expecteResponse bool, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (f *testDoubleMOFactory) NewMsgRequestOrder(expecteResponse bool, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { panic("implement me") } -func (f *testDoubleMOFactory) NewMsgBlockRequestOrder(respReceiver p2pcommon.ResponseReceiver, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (f *testDoubleMOFactory) NewMsgBlockRequestOrder(respReceiver p2pcommon.ResponseReceiver, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { panic("implement me") } -func (f *testDoubleMOFactory) NewMsgResponseOrder(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { +func (f *testDoubleMOFactory) NewMsgResponseOrder(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) p2pcommon.MsgOrder { f.lastResp = message f.lastStatus = f.lastResp.(types.ResponseMessage).GetStatus() return &testMo{message:&testMessage{id:reqID, subProtocol:protocolID}} diff --git a/p2p/subproto/bp.go b/p2p/subproto/bp.go index 2c62a1180..fde3526cc 100644 --- a/p2p/subproto/bp.go +++ b/p2p/subproto/bp.go @@ -10,10 +10,9 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/internal/enc" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" ) type blockProducedNoticeHandler struct { @@ -28,11 +27,11 @@ func NewBlockProducedNoticeHandler(pm p2pcommon.PeerManager, peer p2pcommon.Remo return bh } -func (bh *blockProducedNoticeHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *blockProducedNoticeHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.BlockProducedNotice{}) } -func (bh *blockProducedNoticeHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *blockProducedNoticeHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.BlockProducedNotice) if data.Block == nil || len(data.Block.Hash) == 0 { diff --git a/p2p/subproto/getblock.go b/p2p/subproto/getblock.go index e59561632..3b801de2f 100644 --- a/p2p/subproto/getblock.go +++ b/p2p/subproto/getblock.go @@ -36,7 +36,7 @@ func NewBlockReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, log return bh } -func (bh *blockRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *blockRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetBlockRequest{}) } @@ -44,7 +44,7 @@ const ( EmptyGetBlockResponseSize = 12 // roughly estimated maximum size if element is full ) -func (bh *blockRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *blockRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetBlockRequest) p2putil.DebugLogReceiveMsg(bh.logger, bh.protocol, msg.ID().String(), remotePeer, len(data.Hashes)) @@ -130,11 +130,11 @@ func NewBlockRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, lo return bh } -func (bh *blockResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (bh *blockResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetBlockResponse{}) } -func (bh *blockResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (bh *blockResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := bh.peer data := msgBody.(*types.GetBlockResponse) if bh.logger.IsDebugEnabled() { diff --git a/p2p/subproto/getblock_test.go b/p2p/subproto/getblock_test.go index c80f5bdda..60508f151 100644 --- a/p2p/subproto/getblock_test.go +++ b/p2p/subproto/getblock_test.go @@ -13,7 +13,6 @@ import ( "github.com/aergoio/aergo/p2p/p2pmock" "github.com/aergoio/aergo/types" "github.com/golang/mock/gomock" - "github.com/golang/protobuf/proto" "github.com/libp2p/go-libp2p-peer" "github.com/stretchr/testify/assert" "testing" @@ -128,11 +127,11 @@ func TestBlockResponseHandler_handle(t *testing.T) { // 1. not exist receiver and consumed message //{"Tnothing",nil, true}, // 2. exist receiver and consume successfully - {"TexistAndConsume", func(msg p2pcommon.Message, body proto.Message) bool { + {"TexistAndConsume", func(msg p2pcommon.Message, body p2pcommon.MessageBody) bool { return true }, 0, 0}, // 2. exist receiver but not consumed - {"TExistWrong", func (msg p2pcommon.Message, msgBody proto.Message) bool { + {"TExistWrong", func (msg p2pcommon.Message, msgBody p2pcommon.MessageBody) bool { return false }, 1, 1}, // TODO: test cases diff --git a/p2p/subproto/getcluster.go b/p2p/subproto/getcluster.go new file mode 100644 index 000000000..1526ac12d --- /dev/null +++ b/p2p/subproto/getcluster.go @@ -0,0 +1,93 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package subproto + +import ( + "errors" + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/aergoio/aergo/types" +) + +var ( + ErrConsensusAccessorNotReady = errors.New("consensus acessor is not ready") +) + +type getClusterRequestHandler struct { + BaseMsgHandler + + consAcc consensus.ConsensusAccessor +} + +var _ p2pcommon.MessageHandler = (*getClusterRequestHandler)(nil) + +type getClusterResponseHandler struct { + BaseMsgHandler +} + +var _ p2pcommon.MessageHandler = (*getClusterResponseHandler)(nil) + +// NewGetClusterReqHandler creates handler for PingRequest +func NewGetClusterReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logger *log.Logger, actor p2pcommon.ActorService, consAcc consensus.ConsensusAccessor) *getClusterRequestHandler { + ph := &getClusterRequestHandler{ + BaseMsgHandler: BaseMsgHandler{protocol: GetClusterRequest, pm: pm, peer: peer, actor: actor, logger: logger}, + consAcc: consAcc, + } + return ph +} + +func (ph *getClusterRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { + return p2putil.UnmarshalAndReturn(rawbytes, &types.GetClusterInfoRequest{}) +} + +func (ph *getClusterRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { + //peerID := ph.peer.ID() + remotePeer := ph.peer + data := msgBody.(*types.GetClusterInfoRequest) + p2putil.DebugLogReceiveMsg(ph.logger, ph.protocol, msg.ID().String(), remotePeer, data.String()) + + resp := &types.GetClusterInfoResponse{} + + // GetClusterInfo from consensus + if ph.consAcc == nil { + resp.Error = ErrConsensusAccessorNotReady.Error() + } else { + mbrs, chainID, err := ph.consAcc.ClusterInfo() + if err != nil { + resp.Error = err.Error() + } else { + resp.MbrAttrs = mbrs + resp.ChainID = chainID + } + } + + remotePeer.SendMessage(remotePeer.MF().NewMsgResponseOrder(msg.ID(), GetClusterResponse, resp)) +} + +// NewGetClusterRespHandler creates handler for PingRequest +func NewGetClusterRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logger *log.Logger, actor p2pcommon.ActorService) *getClusterResponseHandler { + ph := &getClusterResponseHandler{BaseMsgHandler{protocol: GetClusterResponse, pm: pm, peer: peer, actor: actor, logger: logger}} + return ph +} + +func (ph *getClusterResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { + return p2putil.UnmarshalAndReturn(rawbytes, &types.GetClusterInfoResponse{}) +} + +func (ph *getClusterResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { + remotePeer := ph.peer + data := msgBody.(*types.GetClusterInfoResponse) + p2putil.DebugLogReceiveResponseMsg(ph.logger, ph.protocol, msg.ID().String(), msg.OriginalID().String(), remotePeer, data.String()) + + if !remotePeer.GetReceiver(msg.OriginalID())(msg, data) { + // ignore dangling response + // TODO add penalty if needed + remotePeer.ConsumeRequest(msg.OriginalID()) + } + +} diff --git a/p2p/subproto/ping.go b/p2p/subproto/ping.go index ea20a30d6..187481018 100644 --- a/p2p/subproto/ping.go +++ b/p2p/subproto/ping.go @@ -10,10 +10,9 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/internal/enc" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/types" - "github.com/golang/protobuf/proto" ) type pingRequestHandler struct { @@ -40,11 +39,11 @@ func NewPingReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logg return ph } -func (ph *pingRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (ph *pingRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.Ping{}) } -func (ph *pingRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (ph *pingRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := ph.peer pingData := msgBody.(*types.Ping) p2putil.DebugLogReceiveMsg(ph.logger, ph.protocol, msg.ID().String(), remotePeer, fmt.Sprintf("blockHash=%s blockNo=%d", enc.ToString(pingData.BestBlockHash), pingData.BestHeight)) @@ -66,11 +65,11 @@ func NewPingRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, log return ph } -func (ph *pingResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (ph *pingResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.Pong{}) } -func (ph *pingResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (ph *pingResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := ph.peer //data := msgBody.(*types.Pong) p2putil.DebugLogReceiveMsg(ph.logger, ph.protocol, msg.ID().String(), remotePeer, nil) @@ -83,11 +82,11 @@ func NewGoAwayHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logge return ph } -func (ph *goAwayHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (ph *goAwayHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GoAwayNotice{}) } -func (ph *goAwayHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (ph *goAwayHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { data := msgBody.(*types.GoAwayNotice) p2putil.DebugLogReceiveMsg(ph.logger, ph.protocol, msg.ID().String(), ph.peer, data.Message) diff --git a/p2p/subproto/subprotocols.go b/p2p/subproto/subprotocols.go index ecae33018..0db6119b1 100644 --- a/p2p/subproto/subprotocols.go +++ b/p2p/subproto/subprotocols.go @@ -39,4 +39,10 @@ const ( BlockProducedNotice p2pcommon.SubProtocol = 0x030 + iota ) +const ( + _ p2pcommon.SubProtocol = 0x3100 + iota + GetClusterRequest + GetClusterResponse +) + //go:generate stringer -type=SubProtocol diff --git a/p2p/subproto/tx.go b/p2p/subproto/tx.go index 592f524eb..760cf696e 100644 --- a/p2p/subproto/tx.go +++ b/p2p/subproto/tx.go @@ -9,8 +9,8 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/internal/enc" "github.com/aergoio/aergo/message" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/types" "github.com/golang/protobuf/proto" ) @@ -42,11 +42,11 @@ func NewTxReqHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logger return th } -func (th *txRequestHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (th *txRequestHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetTransactionsRequest{}) } -func (th *txRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (th *txRequestHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := th.peer reqHashes := msgBody.(*types.GetTransactionsRequest).Hashes @@ -123,7 +123,7 @@ func (th *txRequestHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) status = types.ResultStatus_NOT_FOUND } th.logger.Debug().Int(p2putil.LogTxCount, len(hashes)). - Str(p2putil.LogOrgReqID, msg.ID().String()).Str(p2putil.LogRespStatus,status.String()).Msg("Sending last part response") + Str(p2putil.LogOrgReqID, msg.ID().String()).Str(p2putil.LogRespStatus, status.String()).Msg("Sending last part response") // generate response message resp := &types.GetTransactionsResponse{ @@ -139,11 +139,11 @@ func NewTxRespHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, logge return th } -func (th *txResponseHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (th *txResponseHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.GetTransactionsResponse{}) } -func (th *txResponseHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (th *txResponseHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { data := msgBody.(*types.GetTransactionsResponse) p2putil.DebugLogReceiveResponseMsg(th.logger, th.protocol, msg.ID().String(), msg.OriginalID().String(), th.peer, len(data.Txs)) @@ -164,11 +164,11 @@ func NewNewTxNoticeHandler(pm p2pcommon.PeerManager, peer p2pcommon.RemotePeer, return th } -func (th *newTxNoticeHandler) ParsePayload(rawbytes []byte) (proto.Message, error) { +func (th *newTxNoticeHandler) ParsePayload(rawbytes []byte) (p2pcommon.MessageBody, error) { return p2putil.UnmarshalAndReturn(rawbytes, &types.NewTransactionsNotice{}) } -func (th *newTxNoticeHandler) Handle(msg p2pcommon.Message, msgBody proto.Message) { +func (th *newTxNoticeHandler) Handle(msg p2pcommon.Message, msgBody p2pcommon.MessageBody) { remotePeer := th.peer data := msgBody.(*types.NewTransactionsNotice) // remove to verbose log diff --git a/p2p/subproto/tx_test.go b/p2p/subproto/tx_test.go index 50a9999eb..7d69cf285 100644 --- a/p2p/subproto/tx_test.go +++ b/p2p/subproto/tx_test.go @@ -73,7 +73,7 @@ func TestTxRequestHandler_handle(t *testing.T) { actor.EXPECT().CallRequestDefaultTimeout(message.MemPoolSvc, gomock.AssignableToTypeOf(&message.MemPoolExistEx{})).Return(&message.MemPoolExistExRsp{Txs: dummyTxs}, nil).Times(1) msgHelper.EXPECT().ExtractTxsFromResponseAndError(gomock.AssignableToTypeOf(&message.MemPoolExistExRsp{}), nil).Return(dummyTxs, nil).Times(1) hashes := sampleTxs[:1] - mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) { + mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) { resp := message.(*types.GetTransactionsResponse) assert.Equal(tt, types.ResultStatus_OK, resp.Status) assert.Equal(tt, 1, len(resp.Hashes)) @@ -92,7 +92,7 @@ func TestTxRequestHandler_handle(t *testing.T) { actor.EXPECT().CallRequestDefaultTimeout(message.MemPoolSvc, gomock.AssignableToTypeOf(&message.MemPoolExistEx{})).Return(&message.MemPoolExistExRsp{Txs: dummyTxs}, nil).Times(1) msgHelper.EXPECT().ExtractTxsFromResponseAndError(gomock.AssignableToTypeOf(&message.MemPoolExistExRsp{}), nil).Return(dummyTxs, nil).Times(1) hashes := sampleTxs - mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) { + mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) { resp := message.(*types.GetTransactionsResponse) assert.Equal(tt, types.ResultStatus_OK, resp.Status) assert.Equal(tt, len(sampleTxs), len(resp.Hashes)) @@ -112,7 +112,7 @@ func TestTxRequestHandler_handle(t *testing.T) { } actor.EXPECT().CallRequestDefaultTimeout(message.MemPoolSvc, gomock.AssignableToTypeOf(&message.MemPoolExistEx{})).Return(&message.MemPoolExistExRsp{Txs: dummyTxs}, nil).Times(1) msgHelper.EXPECT().ExtractTxsFromResponseAndError(gomock.AssignableToTypeOf(&message.MemPoolExistExRsp{}), nil).Return(dummyTxs, nil).Times(1) - mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) { + mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) { resp := message.(*types.GetTransactionsResponse) assert.Equal(tt, types.ResultStatus_OK, resp.Status) assert.Equal(tt, len(dummyTxs), len(resp.Hashes)) @@ -133,7 +133,7 @@ func TestTxRequestHandler_handle(t *testing.T) { //}), nil).Return(dummyTx, nil) msgHelper.EXPECT().ExtractTxsFromResponseAndError(&MempoolRspTxCountMatcher{0}, nil).Return(nil, nil).Times(1) hashes := sampleTxs - mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) { + mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) { resp := message.(*types.GetTransactionsResponse) assert.Equal(tt, types.ResultStatus_NOT_FOUND, resp.Status) assert.Equal(tt, 0, len(resp.Hashes)) @@ -148,7 +148,7 @@ func TestTxRequestHandler_handle(t *testing.T) { //msgHelper.EXPECT().ExtractTxsFromResponseAndError", nil, gomock.AssignableToTypeOf("error")).Return(nil, fmt.Errorf("error")) msgHelper.EXPECT().ExtractTxsFromResponseAndError(nil, &WantErrMatcher{true}).Return(nil, fmt.Errorf("error")).Times(0) hashes := sampleTxs - mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) { + mockMF.EXPECT().NewMsgResponseOrder(sampleMsgID, GetTXsResponse, gomock.AssignableToTypeOf(&types.GetTransactionsResponse{})).Do(func(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.MessageBody) { resp := message.(*types.GetTransactionsResponse) // TODO check if the changed behavior is fair or not. assert.Equal(tt, types.ResultStatus_NOT_FOUND, resp.Status) diff --git a/p2p/syncmanager_test.go b/p2p/syncmanager_test.go index 1b70a61d3..59b5d7e86 100644 --- a/p2p/syncmanager_test.go +++ b/p2p/syncmanager_test.go @@ -7,6 +7,7 @@ package p2p import ( "bytes" + "github.com/aergoio/aergo/p2p/subproto" "testing" "github.com/aergoio/aergo-lib/log" @@ -246,7 +247,7 @@ func TestSyncManager_HandleGetBlockResponse(t *testing.T) { dummyMsgID := p2pcommon.NewMsgID() target := newSyncManager(mockActor, mockPM, logger).(*syncManager) - msg := &V030Message{originalID: dummyMsgID} + msg := p2pcommon.NewSimpleRespMsgVal(subproto.PingResponse, p2pcommon.NewMsgID(), dummyMsgID) resp := &types.GetBlockResponse{Blocks: test.respBlocks} target.HandleGetBlockResponse(mockPeer, msg, resp) diff --git a/p2p/networktransport.go b/p2p/transport/networktransport.go similarity index 85% rename from p2p/networktransport.go rename to p2p/transport/networktransport.go index b33130f2b..3f938bac6 100644 --- a/p2p/networktransport.go +++ b/p2p/transport/networktransport.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package p2p +package transport import ( "context" @@ -14,6 +14,7 @@ import ( "time" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pkey" "github.com/aergoio/aergo/p2p/p2putil" host "github.com/libp2p/go-libp2p-host" inet "github.com/libp2p/go-libp2p-net" @@ -52,18 +53,9 @@ type networkTransport struct { var _ p2pcommon.NetworkTransport = (*networkTransport)(nil) -func (sl *networkTransport) PrivateKey() crypto.PrivKey { - return sl.privateKey -} -func (sl *networkTransport) PublicKey() crypto.PubKey { - return sl.publicKey -} func (sl *networkTransport) SelfMeta() p2pcommon.PeerMeta { return sl.selfMeta } -func (sl *networkTransport) SelfNodeID() peer.ID { - return sl.selfMeta.ID -} func NewNetworkTransport(conf *cfg.P2PConfig, logger *log.Logger) *networkTransport { nt := &networkTransport{ @@ -79,9 +71,9 @@ func NewNetworkTransport(conf *cfg.P2PConfig, logger *log.Logger) *networkTransp func (sl *networkTransport) initNT() { // check Key and address - priv := NodePrivKey() - pub := NodePubKey() - peerID := NodeID() + priv := p2pkey.NodePrivKey() + pub := p2pkey.NodePubKey() + peerID := p2pkey.NodeID() sl.privateKey = priv sl.publicKey = pub @@ -125,6 +117,7 @@ func (sl *networkTransport) initSelfMeta(peerID peer.ID, noExpose bool) { sl.selfMeta.Port = uint32(protocolPort) sl.selfMeta.ID = peerID sl.selfMeta.Hidden = noExpose + sl.selfMeta.Version = p2pkey.NodeVersion() // bind address and port will be overriden if configuration is specified sl.bindAddress = ipAddress @@ -162,8 +155,8 @@ func (sl *networkTransport) AddStreamHandler(pid protocol.ID, handler inet.Strea // GetOrCreateStream try to connect and handshake to remote peer. it can be called after peermanager is inited. // It return true if peer is added or return false if failed to add peer or more suitable connection already exists. -func (sl *networkTransport) GetOrCreateStreamWithTTL(meta p2pcommon.PeerMeta, protocolID protocol.ID, ttl time.Duration) (inet.Stream, error) { - var peerAddr, err = PeerMetaToMultiAddr(meta) +func (sl *networkTransport) GetOrCreateStreamWithTTL(meta p2pcommon.PeerMeta, ttl time.Duration, protocolIDs ...protocol.ID) (inet.Stream, error) { + var peerAddr, err = p2putil.PeerMetaToMultiAddr(meta) if err != nil { sl.logger.Warn().Err(err).Str("addr", meta.IPAddress).Msg("invalid NPAddPeer address") return nil, fmt.Errorf("invalid IP address %s:%d", meta.IPAddress, meta.Port) @@ -171,16 +164,16 @@ func (sl *networkTransport) GetOrCreateStreamWithTTL(meta p2pcommon.PeerMeta, pr var peerID = meta.ID sl.Peerstore().AddAddr(peerID, peerAddr, ttl) ctx := context.Background() - s, err := sl.NewStream(ctx, meta.ID, protocolID) + s, err := sl.NewStream(ctx, meta.ID, protocolIDs...) if err != nil { - sl.logger.Info().Err(err).Str("addr", meta.IPAddress).Str(p2putil.LogPeerID, p2putil.ShortForm(meta.ID)).Str(p2putil.LogProtoID, string(protocolID)).Msg("Error while get stream") + sl.logger.Info().Err(err).Str("addr", meta.IPAddress).Str(p2putil.LogPeerID, p2putil.ShortForm(meta.ID)).Str("p2p_proto", p2putil.ProtocolIDsToString(protocolIDs)).Msg("Error while get stream") return nil, err } return s, nil } -func (sl *networkTransport) GetOrCreateStream(meta p2pcommon.PeerMeta, protocolID protocol.ID) (inet.Stream, error) { - return sl.GetOrCreateStreamWithTTL(meta, protocolID, getTTL(meta)) +func (sl *networkTransport) GetOrCreateStream(meta p2pcommon.PeerMeta, protocolIDs ...protocol.ID) (inet.Stream, error) { + return sl.GetOrCreateStreamWithTTL(meta, getTTL(meta), protocolIDs...) } func (sl *networkTransport) FindPeer(peerID peer.ID) bool { @@ -215,7 +208,7 @@ func (sl *networkTransport) ClosePeerConnection(peerID peer.ID) bool { func (sl *networkTransport) startListener() { var err error listens := make([]ma.Multiaddr, 0, 2) - listen, err := ToMultiAddr(sl.bindAddress, sl.bindPort) + listen, err := p2putil.ToMultiAddr(sl.bindAddress, sl.bindPort) if err != nil { panic("Can't estabilish listening address: " + err.Error()) } @@ -228,10 +221,8 @@ func (sl *networkTransport) startListener() { sl.logger.Fatal().Err(err).Str("addr", listen.String()).Msg("Couldn't listen from") panic(err.Error()) } - - sl.logger.Info().Str(p2putil.LogFullID, sl.SelfNodeID().Pretty()).Str(p2putil.LogPeerID, p2putil.ShortForm(sl.SelfNodeID())).Str("addr[0]", listens[0].String()). - Msg("Set self node's pid, and listening for connections") sl.Host = newHost + sl.logger.Info().Str(p2putil.LogFullID, sl.ID().Pretty()).Str(p2putil.LogPeerID, p2putil.ShortForm(sl.ID())).Str("addr[0]", listens[0].String()). Msg("Set self node's pid, and listening for connections") } func (sl *networkTransport) Stop() error { @@ -250,7 +241,7 @@ func (sl *networkTransport) GetAddressesOfPeer(peerID peer.ID) []string { // TTL return node's ttl func getTTL(m p2pcommon.PeerMeta) time.Duration { if m.Designated { - return DesignatedNodeTTL + return p2pcommon.DesignatedNodeTTL } - return DefaultNodeTTL + return p2pcommon.DefaultNodeTTL } diff --git a/p2p/transport/networktransport_test.go b/p2p/transport/networktransport_test.go new file mode 100644 index 000000000..2359b9908 --- /dev/null +++ b/p2p/transport/networktransport_test.go @@ -0,0 +1,243 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package transport + +import ( + "encoding/hex" + "fmt" + "net" + "testing" + "time" + + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/config" + cfg "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/message" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pkey" + "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/aergoio/aergo/types" + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p-peer" + "github.com/stretchr/testify/assert" +) + +const ( + sampleKeyFile = "../../test/sample.key" +) + +func init() { + //sampleID := "16Uiu2HAmP2iRDpPumUbKhNnEngoxAUQWBmCyn7FaYUrkaDAMXJPJ" + baseCfg := &config.BaseConfig{AuthDir: "test"} + p2pCfg := &config.P2PConfig{NPKey: sampleKeyFile} + p2pkey.InitNodeInfo(baseCfg, p2pCfg, "0.0.1-test", log.NewLogger("test.transport")) +} + +// TODO split this test into two... one is to attempt make connection and the other is test peermanager if same peerid is given +// Ignoring test for now, for lack of abstraction on AergoPeer struct +func IgrenoreTestP2PServiceRunAddPeer(t *testing.T) { + var sampleBlockHash, _ = hex.DecodeString("4f461d85e869ade8a0544f8313987c33a9c06534e50c4ad941498299579bd7ac") + var sampleBlockHeight uint64 = 100215 + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockActor := p2pmock.NewMockActorService(ctrl) + dummyBlock := types.Block{Hash: sampleBlockHash, Header: &types.BlockHeader{BlockNo: sampleBlockHeight}} + mockActor.EXPECT().CallRequest(gomock.Any(), gomock.Any(), gomock.Any()).Return(message.GetBlockRsp{Block: &dummyBlock}, nil) + //mockMF := new(MockMoFactory) + target := &networkTransport{conf: config.NewServerContext("", "").GetDefaultConfig().(*config.Config).P2P, + logger: log.NewLogger("test.p2p")} + + //target.Host = &mockHost{peerstore.NewPeerstore(pstoremem.NewKeyBook(), pstoremem.NewAddrBook(), pstoremem.NewPeerMetadata())} + target.Host = p2pmock.NewMockHost(ctrl) + target.selfMeta.ID = peer.ID("gwegw") + + sampleAddr1 := p2pcommon.PeerMeta{ID: "ddd", IPAddress: "192.168.0.1", Port: 33888, Outbound: true} + sampleAddr2 := p2pcommon.PeerMeta{ID: "fff", IPAddress: "192.168.0.2", Port: 33888, Outbound: true} + target.GetOrCreateStream(sampleAddr1, p2pcommon.LegacyP2PSubAddr) + target.GetOrCreateStream(sampleAddr1, p2pcommon.LegacyP2PSubAddr) + time.Sleep(time.Second) + if len(target.Peerstore().Peers()) != 1 { + t.Errorf("Peer count : Expected %d, Actually %d", 1, len(target.Peerstore().Peers())) + } + target.GetOrCreateStream(sampleAddr2, p2pcommon.LegacyP2PSubAddr) + time.Sleep(time.Second * 1) + if len(target.Peerstore().Peers()) != 2 { + t.Errorf("Peer count : Expected %d, Actually %d", 2, len(target.Peerstore().Peers())) + } +} + +func Test_networkTransport_initSelfMeta(t *testing.T) { + logger := log.NewLogger("test.transport") + samplePeerID, _ := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") + + type args struct { + peerID peer.ID + noExpose bool + } + tests := []struct { + name string + conf *cfg.P2PConfig + + args args + + wantSameAddr bool + wantPort uint32 + wantID peer.ID + wantHidden bool + }{ + {"TIP6", &cfg.P2PConfig{NetProtocolAddr: "fe80::dcbf:beff:fe87:e30a", NetProtocolPort: 7845}, args{samplePeerID, false}, true, 7845, samplePeerID, false}, + {"TIP4", &cfg.P2PConfig{NetProtocolAddr: "211.1.1.2", NetProtocolPort: 7845}, args{samplePeerID, false}, true, 7845, samplePeerID, false}, + {"TDN", &cfg.P2PConfig{NetProtocolAddr: "www.aergo.io", NetProtocolPort: 7845}, args{samplePeerID, false}, true, 7845, samplePeerID, false}, + {"TDefault", &cfg.P2PConfig{NetProtocolAddr: "", NetProtocolPort: 7845}, args{samplePeerID, false}, false, 7845, samplePeerID, false}, + {"THidden", &cfg.P2PConfig{NetProtocolAddr: "211.1.1.2", NetProtocolPort: 7845}, args{samplePeerID, true}, true, 7845, samplePeerID, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sl := &networkTransport{ + conf: tt.conf, + logger: logger, + } + + sl.initSelfMeta(tt.args.peerID, tt.args.noExpose) + + if tt.wantSameAddr { + assert.Equal(t, tt.conf.NetProtocolAddr, sl.selfMeta.IPAddress) + } else { + assert.NotEqual(t, tt.conf.NetProtocolAddr, sl.selfMeta.IPAddress) + } + assert.Equal(t, tt.wantPort, sl.selfMeta.Port) + assert.Equal(t, tt.wantID, sl.selfMeta.ID) + assert.Equal(t, tt.wantHidden, sl.selfMeta.Hidden) + + assert.NotNil(t, sl.bindAddress) + fmt.Println("ProtocolAddress: ", sl.selfMeta.IPAddress) + fmt.Println("bindAddress: ", sl.bindAddress.String()) + }) + } +} + +func TestNewNetworkTransport(t *testing.T) { + logger := log.NewLogger("test.transport") + svrctx := config.NewServerContext("", "") + sampleAddr := "211.1.2.3" + + tests := []struct { + name string + + protocolAddr string + bindAddr string + protocolPort int + bindPort int + wantAddress net.IP + wantPort uint32 + }{ + {"TDefault", "", "", -1, -1, nil, 7846}, + {"TAddrProto", sampleAddr, "", -1, -1, net.ParseIP(sampleAddr), 7846}, + {"TAddrBind", "", sampleAddr,-1, -1, net.ParseIP(sampleAddr), 7846}, + {"TAddrDiffer", "123.45.67.89", sampleAddr,-1, -1, net.ParseIP(sampleAddr), 7846}, + + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + conf := svrctx.GetDefaultP2PConfig() + if len(tt.protocolAddr) > 0 { + conf.NetProtocolAddr = tt.protocolAddr + } + if len(tt.bindAddr) > 0 { + conf.NPBindAddr = tt.bindAddr + } + if tt.protocolPort > 0 { + conf.NetProtocolPort = tt.protocolPort + } + if tt.bindPort > 0 { + conf.NPBindPort = tt.bindPort + } + got := NewNetworkTransport(conf, logger) + + if got.privateKey == nil { + t.Errorf("NewNetworkTransport() privkey is nil, want not") + } + if got.publicKey == nil { + t.Errorf("NewNetworkTransport() pubkey is nil, want %v", p2pkey.NodePubKey()) + } + if got.selfMeta.Version == "" { + t.Errorf("NewNetworkTransport() = %v, want %v", got.selfMeta.Version, p2pkey.NodeVersion()) + } + addr := got.bindAddress + port := got .bindPort + if tt.wantAddress == nil { + if addr.IsLoopback() || addr.IsUnspecified() { + t.Errorf("initServiceBindAddress() addr = %v, want valid addr", addr) + } + } else { + if !addr.Equal(tt.wantAddress) { + t.Errorf("initServiceBindAddress() addr = %v, want %v", addr, tt.wantAddress) + } + } + if port != tt.wantPort { + t.Errorf("initServiceBindAddress() port = %v, want %v", port, tt.wantPort) + } + + }) + } +} + +func Test_networkTransport_initServiceBindAddress(t *testing.T) { + logger := log.NewLogger("test.transport") + svrctx := config.NewServerContext("", "") + initialPort := 7846 + + tests := []struct { + name string + conf *cfg.P2PConfig + + wantAddress net.IP + wantPort int + }{ + {"TEmpty", svrctx.GetDefaultP2PConfig(), nil, initialPort}, + {"TAnywhere", withAddr(svrctx.GetDefaultP2PConfig(), "0.0.0.0") , net.ParseIP("0.0.0.0"), initialPort}, + {"TCustAddr", withAddr(svrctx.GetDefaultP2PConfig(), "211.1.2.3") , net.ParseIP("211.1.2.3"), initialPort}, + {"TCustAddrPort", withPort(withAddr(svrctx.GetDefaultP2PConfig(), "211.1.2.3"),7777) , net.ParseIP("211.1.2.3"), 7777}, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sl := &networkTransport{conf: tt.conf, + logger: logger, + bindPort: uint32(initialPort), + } + sl.initServiceBindAddress() + + addr := sl.bindAddress + port := sl.bindPort + // init result must always bind balid address + if tt.wantAddress == nil { + if addr.IsLoopback() || addr.IsUnspecified() { + t.Errorf("initServiceBindAddress() addr = %v, want valid addr", addr) + } + } else { + if !addr.Equal(tt.wantAddress) { + t.Errorf("initServiceBindAddress() addr = %v, want %v", addr, tt.wantAddress) + } + } + if int(port) != tt.wantPort { + t.Errorf("initServiceBindAddress() port = %v, want %v", port, tt.wantPort) + } + }) + } +} + +func withAddr(conf *cfg.P2PConfig, addr string) *cfg.P2PConfig { + conf.NPBindAddr = addr + return conf +} + +func withPort(conf *cfg.P2PConfig, port int) *cfg.P2PConfig { + conf.NPBindPort = port + return conf +} diff --git a/p2p/v030/doc.go b/p2p/v030/doc.go new file mode 100644 index 000000000..249b6505e --- /dev/null +++ b/p2p/v030/doc.go @@ -0,0 +1,8 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +// Package v030 is a collection of classes for p2p version 0.3.0 +// It contains IO, handshake, handlers, etc. +package v030 diff --git a/p2p/v030handshake.go b/p2p/v030/v030handshake.go similarity index 67% rename from p2p/v030handshake.go rename to p2p/v030/v030handshake.go index 2718cf776..8165e16e5 100644 --- a/p2p/v030handshake.go +++ b/p2p/v030/v030handshake.go @@ -3,12 +3,15 @@ * @copyright defined in aergo/LICENSE.txt */ -package p2p +package v030 import ( "bufio" + "context" "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" "io" + "time" "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/p2p/p2pcommon" @@ -31,26 +34,20 @@ type V030Handshaker struct { msgRW p2pcommon.MsgReadWriter } -type V030HSMessage struct { - HSHeader - Sigature [p2pcommon.SigLength]byte - PubKeyB []byte - Timestamp uint64 - Nonce uint16 -} +var _ p2pcommon.VersionedHandshaker = (*V030Handshaker)(nil) func (h *V030Handshaker) GetMsgRW() p2pcommon.MsgReadWriter { return h.msgRW } -func newV030StateHS(pm p2pcommon.PeerManager, actorServ p2pcommon.ActorService, log *log.Logger, chainID *types.ChainID, peerID peer.ID, rd io.Reader, wr io.Writer) *V030Handshaker { +func NewV030StateHS(pm p2pcommon.PeerManager, actorServ p2pcommon.ActorService, log *log.Logger, chainID *types.ChainID, peerID peer.ID, rd io.Reader, wr io.Writer) *V030Handshaker { h := &V030Handshaker{pm: pm, actorServ: actorServ, logger: log, chainID: chainID, peerID: peerID, rd: bufio.NewReader(rd), wr: bufio.NewWriter(wr)} h.msgRW = NewV030ReadWriter(h.rd, h.wr) return h } // handshakeOutboundPeer start handshake with outbound peer -func (h *V030Handshaker) doForOutbound() (*types.Status, error) { +func (h *V030Handshaker) DoForOutbound(ctx context.Context) (*types.Status, error) { rw := h.msgRW peerID := h.peerID @@ -58,12 +55,12 @@ func (h *V030Handshaker) doForOutbound() (*types.Status, error) { h.logger.Debug().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("Starting Handshake for outbound peer connection") // send status - statusMsg, err := createStatusMsg(h.pm, h.actorServ, h.chainID) + hostStatus, err := createStatus(h.pm, h.actorServ, h.chainID) if err != nil { return nil, err } - moFactory := &v030MOFactory{} - container := moFactory.newHandshakeMessage(subproto.StatusRequest, statusMsg) + + container := createMessage(subproto.StatusRequest, p2pcommon.NewMsgID(), hostStatus) if container == nil { // h.logger.Warn().Str(LogPeerID, ShortForm(peerID)).Err(err).Msg("failed to create p2p message") return nil, fmt.Errorf("failed to craete container message") @@ -71,6 +68,12 @@ func (h *V030Handshaker) doForOutbound() (*types.Status, error) { if err = rw.WriteMsg(container); err != nil { return nil, err } + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // go on + } // and wait to response status data, err := rw.ReadMsg() @@ -78,6 +81,12 @@ func (h *V030Handshaker) doForOutbound() (*types.Status, error) { // h.logger.Info().Err(err).Msg("fail to decode") return nil, err } + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // go on + } if data.Subprotocol() != subproto.StatusRequest { if data.Subprotocol() == subproto.GoAway { @@ -87,7 +96,7 @@ func (h *V030Handshaker) doForOutbound() (*types.Status, error) { } } remotePeerStatus := &types.Status{} - err = p2putil.UnmarshalMessage(data.Payload(), remotePeerStatus) + err = p2putil.UnmarshalMessageBody(data.Payload(), remotePeerStatus) if err != nil { return nil, err } @@ -112,7 +121,7 @@ func (h *V030Handshaker) doForOutbound() (*types.Status, error) { } // onConnect is handle handshake from inbound peer -func (h *V030Handshaker) doForInbound() (*types.Status, error) { +func (h *V030Handshaker) DoForInbound(ctx context.Context) (*types.Status, error) { rw := h.msgRW peerID := h.peerID @@ -125,6 +134,12 @@ func (h *V030Handshaker) doForInbound() (*types.Status, error) { h.logger.Warn().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Err(err).Msg("failed to create p2p message") return nil, err } + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // go on + } if data.Subprotocol() != subproto.StatusRequest { if data.Subprotocol() == subproto.GoAway { @@ -135,15 +150,15 @@ func (h *V030Handshaker) doForInbound() (*types.Status, error) { } } - statusMsg := &types.Status{} - if err := p2putil.UnmarshalMessage(data.Payload(), statusMsg); err != nil { + remotePeerStatus := &types.Status{} + if err := p2putil.UnmarshalMessageBody(data.Payload(), remotePeerStatus); err != nil { h.logger.Warn().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Err(err).Msg("Failed to decode status message.") return nil, err } // check if chainID is same or not remoteChainID := types.NewChainID() - err = remoteChainID.Read(statusMsg.ChainID) + err = remoteChainID.Read(remotePeerStatus.ChainID) if err != nil { return nil, err } @@ -151,19 +166,18 @@ func (h *V030Handshaker) doForInbound() (*types.Status, error) { return nil, fmt.Errorf("different chainID : %s", remoteChainID.ToJSON()) } - peerAddress := statusMsg.Sender + peerAddress := remotePeerStatus.Sender if peerAddress == nil || p2putil.CheckAdddressType(peerAddress.Address) == p2putil.AddressTypeError { return nil, fmt.Errorf("invalid peer address : %s", peerAddress) } // send my status message as response - statusResp, err := createStatusMsg(h.pm, h.actorServ, h.chainID) + hostStatus, err := createStatus(h.pm, h.actorServ, h.chainID) if err != nil { h.logger.Warn().Err(err).Msg("Failed to create status message.") return nil, err } - moFactory := &v030MOFactory{} - container := moFactory.newHandshakeMessage(subproto.StatusRequest, statusResp) + container := createMessage(subproto.StatusRequest, p2pcommon.NewMsgID(), hostStatus) if container == nil { h.logger.Warn().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("failed to create p2p message") return nil, fmt.Errorf("failed to create p2p message") @@ -172,15 +186,54 @@ func (h *V030Handshaker) doForInbound() (*types.Status, error) { h.logger.Warn().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Err(err).Msg("failed to send response status ") return nil, err } - return statusMsg, nil - + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // go on + } + return remotePeerStatus, nil } func (h *V030Handshaker) handleGoAway(peerID peer.ID, data p2pcommon.Message) (*types.Status, error) { goAway := &types.GoAwayNotice{} - if err := p2putil.UnmarshalMessage(data.Payload(), goAway); err != nil { + if err := p2putil.UnmarshalMessageBody(data.Payload(), goAway); err != nil { h.logger.Warn().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Err(err).Msg("Remore peer sent goAway but failed to decode internal message") return nil, err } return nil, fmt.Errorf("remote peer refuse handshake: %s", goAway.GetMessage()) } + +func createStatus(pm p2pcommon.PeerManager, actorServ p2pcommon.ActorService, chainID *types.ChainID) (*types.Status, error) { + // find my best block + bestBlock, err := actorServ.GetChainAccessor().GetBestBlock() + if err != nil { + return nil, err + } + selfAddr := pm.SelfMeta().ToPeerAddress() + chainIDbytes, err := chainID.Bytes() + if err != nil { + return nil, err + } + // create message data + statusMsg := &types.Status{ + Sender: &selfAddr, + ChainID: chainIDbytes, + BestBlockHash: bestBlock.BlockHash(), + BestHeight: bestBlock.GetHeader().GetBlockNo(), + NoExpose: pm.SelfMeta().Hidden, + Version: p2pkey.NodeVersion(), + } + + return statusMsg, nil +} + +func createMessage(protocolID p2pcommon.SubProtocol, msgID p2pcommon.MsgID, msgBody p2pcommon.MessageBody) p2pcommon.Message { + bytes, err := p2putil.MarshalMessageBody(msgBody) + if err != nil { + return nil + } + + msg := p2pcommon.NewMessageValue(protocolID, msgID, p2pcommon.EmptyID, time.Now().UnixNano(), bytes) + return msg +} diff --git a/p2p/v030handshake_test.go b/p2p/v030/v030handshake_test.go similarity index 72% rename from p2p/v030handshake_test.go rename to p2p/v030/v030handshake_test.go index 8199c2e9f..59130186a 100644 --- a/p2p/v030handshake_test.go +++ b/p2p/v030/v030handshake_test.go @@ -3,23 +3,51 @@ * @copyright defined in aergo/LICENSE.txt */ -package p2p +package v030 import ( + "context" "encoding/hex" "fmt" + "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/p2p/p2pkey" + peer "github.com/libp2p/go-libp2p-peer" "reflect" "testing" "github.com/aergoio/aergo-lib/log" - "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/types" "github.com/golang/mock/gomock" ) +var ( + myChainID, theirChainID *types.ChainID + myChainBytes, theirChainBytes []byte + samplePeerID, _ = peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") + dummyBlockHash, _ = hex.DecodeString("4f461d85e869ade8a0544f8313987c33a9c06534e50c4ad941498299579bd7ac") + dummyBlockHeight uint64 = 100215 +) + +func init() { + myChainID = types.NewChainID() + myChainID.Magic = "itSmain1" + myChainBytes, _ = myChainID.Bytes() + + theirChainID = types.NewChainID() + theirChainID.Read(myChainBytes) + theirChainID.Magic = "itsdiff2" + theirChainBytes, _ = theirChainID.Bytes() + + sampleKeyFile := "../../test/sample.key" + baseCfg := &config.BaseConfig{AuthDir: "test"} + p2pCfg := &config.P2PConfig{NPKey: sampleKeyFile} + p2pkey.InitNodeInfo(baseCfg, p2pCfg, "0.0.1-test", log.NewLogger("v030.test")) +} + func TestDeepEqual(t *testing.T) { b1, _ := myChainID.Bytes() b2 := make([]byte, len(b1), len(b1)<<1) @@ -38,12 +66,12 @@ func TestV030StatusHS_doForOutbound(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - logger = log.NewLogger("test") + logger := log.NewLogger("test") mockActor := p2pmock.NewMockActorService(ctrl) mockCA := p2pmock.NewMockChainAccessor(ctrl) mockPM := p2pmock.NewMockPeerManager(ctrl) - dummyMeta := p2pcommon.PeerMeta{ID: dummyPeerID, IPAddress: "dummy.aergo.io"} + dummyMeta := p2pcommon.PeerMeta{ID: samplePeerID, IPAddress: "dummy.aergo.io"} dummyAddr := dummyMeta.ToPeerAddress() mockPM.EXPECT().SelfMeta().Return(dummyMeta).AnyTimes() dummyBlock := &types.Block{Hash: dummyBlockHash, Header: &types.BlockHeader{BlockNo: dummyBlockHeight}} @@ -74,20 +102,20 @@ func TestV030StatusHS_doForOutbound(t *testing.T) { dummyWriter := p2pmock.NewMockWriter(ctrl) mockRW := p2pmock.NewMockMsgReadWriter(ctrl) - containerMsg := &V030Message{} + var containerMsg *p2pcommon.MessageValue if tt.readReturn != nil { - containerMsg.subProtocol = subproto.StatusRequest - statusBytes, _ := p2putil.MarshalMessage(tt.readReturn) - containerMsg.payload = statusBytes + containerMsg = p2pcommon.NewSimpleMsgVal(subproto.StatusRequest, p2pcommon.NewMsgID()) + statusBytes, _ := p2putil.MarshalMessageBody(tt.readReturn) + containerMsg.SetPayload(statusBytes) } else { - containerMsg.subProtocol = subproto.AddressesRequest + containerMsg = p2pcommon.NewSimpleMsgVal(subproto.AddressesRequest, p2pcommon.NewMsgID()) } mockRW.EXPECT().ReadMsg().Return(containerMsg, tt.readError).AnyTimes() mockRW.EXPECT().WriteMsg(gomock.Any()).Return(tt.writeError).AnyTimes() - h := newV030StateHS(mockPM, mockActor, logger, myChainID, samplePeerID, dummyReader, dummyWriter) + h := NewV030StateHS(mockPM, mockActor, logger, myChainID, samplePeerID, dummyReader, dummyWriter) h.msgRW = mockRW - got, err := h.doForOutbound() + got, err := h.DoForOutbound(context.Background()) if (err != nil) != tt.wantErr { t.Errorf("PeerHandshaker.handshakeOutboundPeer() error = %v, wantErr %v", err, tt.wantErr) return @@ -110,12 +138,12 @@ func TestV030StatusHS_handshakeInboundPeer(t *testing.T) { defer ctrl.Finish() // t.SkipNow() - logger = log.NewLogger("test") + logger := log.NewLogger("test") mockActor := p2pmock.NewMockActorService(ctrl) mockCA := p2pmock.NewMockChainAccessor(ctrl) mockPM := p2pmock.NewMockPeerManager(ctrl) - dummyMeta := p2pcommon.PeerMeta{ID: dummyPeerID, IPAddress: "dummy.aergo.io"} + dummyMeta := p2pcommon.PeerMeta{ID: samplePeerID, IPAddress: "dummy.aergo.io"} dummyAddr := dummyMeta.ToPeerAddress() mockPM.EXPECT().SelfMeta().Return(dummyMeta).AnyTimes() dummyBlock := &types.Block{Hash: dummyBlockHash, Header: &types.BlockHeader{BlockNo: dummyBlockHeight}} @@ -147,21 +175,21 @@ func TestV030StatusHS_handshakeInboundPeer(t *testing.T) { dummyWriter := p2pmock.NewMockWriter(ctrl) mockRW := p2pmock.NewMockMsgReadWriter(ctrl) - containerMsg := &V030Message{} + containerMsg := &p2pcommon.MessageValue{} if tt.readReturn != nil { - containerMsg.subProtocol = subproto.StatusRequest - statusBytes, _ := p2putil.MarshalMessage(tt.readReturn) - containerMsg.payload = statusBytes + containerMsg = p2pcommon.NewSimpleMsgVal(subproto.StatusRequest, p2pcommon.NewMsgID()) + statusBytes, _ := p2putil.MarshalMessageBody(tt.readReturn) + containerMsg.SetPayload(statusBytes) } else { - containerMsg.subProtocol = subproto.AddressesRequest + containerMsg = p2pcommon.NewSimpleMsgVal(subproto.AddressesRequest, p2pcommon.NewMsgID()) } mockRW.EXPECT().ReadMsg().Return(containerMsg, tt.readError).AnyTimes() mockRW.EXPECT().WriteMsg(gomock.Any()).Return(tt.writeError).AnyTimes() - h := newV030StateHS(mockPM, mockActor, logger, myChainID, samplePeerID, dummyReader, dummyWriter) + h := NewV030StateHS(mockPM, mockActor, logger, myChainID, samplePeerID, dummyReader, dummyWriter) h.msgRW = mockRW - got, err := h.doForInbound() + got, err := h.DoForInbound(context.Background()) if (err != nil) != tt.wantErr { t.Errorf("PeerHandshaker.handshakeInboundPeer() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/p2p/v030io.go b/p2p/v030/v030io.go similarity index 77% rename from p2p/v030io.go rename to p2p/v030/v030io.go index ea303f1f6..0ce6b75a3 100644 --- a/p2p/v030io.go +++ b/p2p/v030/v030io.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package p2p +package v030 import ( "bufio" @@ -59,20 +59,20 @@ func (r *V030Reader) ReadMsg() (p2pcommon.Message, error) { return nil, fmt.Errorf("invalid msgHeader") } - msg := parseHeader(r.headBuf) - if msg.length > p2pcommon.MaxPayloadLength { + msg, bodyLen := parseHeader(r.headBuf) + if bodyLen > p2pcommon.MaxPayloadLength { return nil, fmt.Errorf("too big payload") } - payload := make([]byte, msg.length) - read, err = r.readToLen(payload, int(msg.length)) + payload := make([]byte, bodyLen) + read, err = r.readToLen(payload, int(bodyLen)) if err != nil { - return nil, fmt.Errorf("failed to read paylod of msg %s %s : %s", msg.subProtocol.String(), msg.id, err.Error()) + return nil, fmt.Errorf("failed to read paylod of msg %s %s : %s", msg.Subprotocol().String(), msg.ID(), err.Error()) } - if read != int(msg.length) { - return nil, fmt.Errorf("failed to read paylod of msg %s %s : payload length mismatch", msg.subProtocol.String(), msg.id) + if read != int(bodyLen) { + return nil, fmt.Errorf("failed to read paylod of msg %s %s : payload length mismatch", msg.Subprotocol().String(), msg.ID()) } - msg.payload = payload + msg.SetPayload(payload) return msg, nil } @@ -123,14 +123,13 @@ func (w *V030Writer) WriteMsg(msg p2pcommon.Message) error { return nil } -func parseHeader(buf [msgHeaderLength]byte) *V030Message { - m := &V030Message{} - m.subProtocol = p2pcommon.SubProtocol(binary.BigEndian.Uint32(buf[0:4])) - m.length = binary.BigEndian.Uint32(buf[4:8]) - m.timestamp = int64(binary.BigEndian.Uint64(buf[8:16])) - copy(m.id[:], buf[16:32]) - copy(m.originalID[:], buf[32:48]) - return m +func parseHeader(buf [msgHeaderLength]byte) (*p2pcommon.MessageValue, uint32) { + subProtocol := p2pcommon.SubProtocol(binary.BigEndian.Uint32(buf[0:4])) + length := binary.BigEndian.Uint32(buf[4:8]) + timestamp := int64(binary.BigEndian.Uint64(buf[8:16])) + msgID := p2pcommon.MustParseBytes(buf[16:32]) + orgID := p2pcommon.MustParseBytes(buf[32:48]) + return p2pcommon.NewLiteMessageValue(subProtocol, msgID, orgID, timestamp), length } func (w *V030Writer) marshalHeader(m p2pcommon.Message) { diff --git a/p2p/v030io_test.go b/p2p/v030/v030io_test.go similarity index 67% rename from p2p/v030io_test.go rename to p2p/v030/v030io_test.go index 91a2bb198..2cdc32c1f 100644 --- a/p2p/v030io_test.go +++ b/p2p/v030/v030io_test.go @@ -3,12 +3,13 @@ * @copyright defined in aergo/LICENSE.txt */ -package p2p +package v030 import ( "bufio" "bytes" "fmt" + "github.com/aergoio/aergo/internal/enc" "io/ioutil" "testing" "time" @@ -21,6 +22,48 @@ import ( "github.com/stretchr/testify/assert" ) + +var sampleTxsB58 = []string{ + "4H4zAkAyRV253K5SNBJtBxqUgHEbZcXbWFFc6cmQHY45", + "6xfk39kuyDST7NwCu8tx3wqwFZ5dwKPDjxUS14tU7NZb8", + "E8dbBGe9Hnuhk35cJoekPjL3VoL4xAxtnRuP47UoxzHd", + "HB7Hg5GUbHuxwe8Lp5PcYUoAaQ7EZjRNG6RuvS6DnDRf", + "BxKmDg9VbWHxrWnStEeTzJ2Ze7RF7YK4rpyjcsWSsnxs", + "DwmGqFU4WgADpYN36FXKsYxMjeppvh9Najg4KxJ8gtX3", +} + +var sampleTxs [][]byte +var sampleTxHashes []types.TxID + +var sampleBlksB58 = []string{ + "v6zbuQ4aVSdbTwQhaiZGp5pcL5uL55X3kt2wfxor5W6", + "2VEPg4MqJUoaS3EhZ6WWSAUuFSuD4oSJ645kSQsGV7H9", + "AtzTZ2CZS45F1276RpTdLfYu2DLgRcd9HL3aLqDT1qte", + "2n9QWNDoUvML756X7xdHWCFLZrM4CQEtnVH2RzG5FYAw", + "6cy7U7XKYtDTMnF3jNkcJvJN5Rn85771NSKjc5Tfo2DM", + "3bmB8D37XZr4DNPs64NiGRa2Vw3i8VEgEy6Xc2XBmRXC", +} +var sampleBlks [][]byte +var sampleBlksHashes []types.BlockID + +func init() { + sampleTxs = make([][]byte, len(sampleTxsB58)) + sampleTxHashes = make([]types.TxID, len(sampleTxsB58)) + for i, hashb58 := range sampleTxsB58 { + hash, _ := enc.ToBytes(hashb58) + sampleTxs[i] = hash + copy(sampleTxHashes[i][:], hash) + } + + sampleBlks = make([][]byte, len(sampleBlksB58)) + sampleBlksHashes = make([]types.BlockID, len(sampleBlksB58)) + for i, hashb58 := range sampleTxsB58 { + hash, _ := enc.ToBytes(hashb58) + sampleBlks[i] = hash + copy(sampleBlksHashes[i][:], hash) + } +} + func Test_ReadWrite(t *testing.T) { var sampleID p2pcommon.MsgID sampleUUID, _ := uuid.NewV4() @@ -45,7 +88,7 @@ func Test_ReadWrite(t *testing.T) { t.Run(test.name, func(t *testing.T) { samplePData := &types.NewTransactionsNotice{TxHashes: test.ids} payload, _ := proto.Marshal(samplePData) - sample := &V030Message{subProtocol: subproto.NewTxNotice, id: sampleID, timestamp: time.Now().UnixNano(), length: uint32(len(payload)), payload: payload} + sample := p2pcommon.NewMessageValue(subproto.NewTxNotice, sampleID, p2pcommon.EmptyID, time.Now().UnixNano(),payload) buf := bytes.NewBuffer(nil) target := NewV030Writer(bufio.NewWriter(buf)) @@ -58,7 +101,7 @@ func Test_ReadWrite(t *testing.T) { readMsg, err := rd.ReadMsg() assert.Nil(t, err) assert.Equal(t, sample, readMsg) - assert.True(t, bytes.Equal(sample.payload, readMsg.Payload())) + assert.True(t, bytes.Equal(sample.Payload(), readMsg.Payload())) // read error test buf2 := bytes.NewBuffer(actual) @@ -76,7 +119,7 @@ func TestV030Writer_WriteError(t *testing.T) { //copy(sampleID[:], sampleUUID[:]) //samplePData := &types.NewTransactionsNotice{TxHashes:sampleTxs} //payload, _ := proto.Marshal(samplePData) - //sample := &V030Message{subProtocol: subproto.NewTxNotice, id: sampleID, timestamp: time.Now().UnixNano(), length: uint32(len(payload)), payload: payload} + //sample := &MessageValue{subProtocol: subproto.NewTxNotice, id: sampleID, timestamp: time.Now().UnixNano(), length: uint32(len(payload)), payload: payload} //mockWriter := make(MockWriter) //mockWriter.On("Write", mock.Anything).Return(fmt.Errorf("writer error")) //target := NewV030Writer(bufio.NewWriter(mockWriter)) @@ -92,19 +135,18 @@ func BenchmarkV030Writer_WriteMsg(b *testing.B) { smallPData := &types.NewTransactionsNotice{} payload, _ := proto.Marshal(smallPData) - smallMsg := &V030Message{id: sampleID, originalID: sampleID, timestamp: timestamp, subProtocol: subproto.NewTxNotice, payload: payload, length: uint32(len(payload))} - + smallMsg := p2pcommon.NewMessageValue(subproto.NewTxNotice, sampleID, p2pcommon.EmptyID, timestamp,payload) bigHashes := make([][]byte, 0, len(sampleTxs)*10000) for i := 0; i < 10000; i++ { bigHashes = append(bigHashes, sampleTxs...) } bigPData := &types.NewTransactionsNotice{TxHashes: bigHashes} payload, _ = proto.Marshal(bigPData) - bigMsg := &V030Message{id: sampleID, originalID: sampleID, timestamp: timestamp, subProtocol: subproto.NewTxNotice, payload: payload, length: uint32(len(payload))} + bigMsg := p2pcommon.NewMessageValue(subproto.NewTxNotice, sampleID, p2pcommon.EmptyID, timestamp,payload) benchmarks := []struct { name string - input *V030Message + input *p2pcommon.MessageValue repeatCount int }{ // write small @@ -137,7 +179,7 @@ func BenchmarkV030Reader_ReadMsg(b *testing.B) { smallPData := &types.NewTransactionsNotice{} payload, _ := proto.Marshal(smallPData) - smallMsg := &V030Message{id: sampleID, originalID: sampleID, timestamp: timestamp, subProtocol: subproto.NewTxNotice, payload: payload, length: uint32(len(payload))} + smallMsg := p2pcommon.NewMessageValue(subproto.NewTxNotice, sampleID, p2pcommon.EmptyID, timestamp,payload) smallBytes := getMashaledV030(smallMsg, 100) bigHashes := make([][]byte, 0, len(sampleTxs)*10000) @@ -146,7 +188,7 @@ func BenchmarkV030Reader_ReadMsg(b *testing.B) { } bigPData := &types.NewTransactionsNotice{TxHashes: bigHashes} payload, _ = proto.Marshal(bigPData) - bigMsg := &V030Message{id: sampleID, originalID: sampleID, timestamp: timestamp, subProtocol: subproto.NewTxNotice, payload: payload, length: uint32(len(payload))} + bigMsg := p2pcommon.NewMessageValue(subproto.NewTxNotice, sampleID, p2pcommon.EmptyID, timestamp,payload) bigBytes := getMashaledV030(bigMsg, 100) fmt.Printf("small : %d , big : %d \n", len(smallBytes), len(bigBytes)) @@ -182,7 +224,7 @@ func BenchmarkV030Reader_ReadMsg(b *testing.B) { } } -func getMashaledV030(m *V030Message, repeat int) []byte { +func getMashaledV030(m *p2pcommon.MessageValue, repeat int) []byte { unitbuf := &bytes.Buffer{} writer := NewV030Writer(bufio.NewWriter(unitbuf)) writer.WriteMsg(m) diff --git a/p2p/v030mofactory.go b/p2p/v030mofactory.go deleted file mode 100644 index 042183654..000000000 --- a/p2p/v030mofactory.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - * @file - * @copyright defined in aergo/LICENSE.txt - */ - -package p2p - -import ( - "time" - - "github.com/aergoio/aergo/p2p/p2putil" - "github.com/aergoio/aergo/p2p/p2pcommon" - "github.com/aergoio/aergo/p2p/subproto" - - "github.com/aergoio/aergo/types" - "github.com/gofrs/uuid" -) - -type v030MOFactory struct { -} - -func (mf *v030MOFactory) NewMsgRequestOrder(expectResponse bool, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { - rmo := &pbRequestOrder{} - msgID := uuid.Must(uuid.NewV4()) - if newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, protocolID, message) { - return rmo - } - return nil -} - -func (mf *v030MOFactory) NewMsgBlockRequestOrder(respReceiver p2pcommon.ResponseReceiver, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { - rmo := &pbRequestOrder{} - msgID := uuid.Must(uuid.NewV4()) - if newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, protocolID, message) { - rmo.respReceiver = respReceiver - return rmo - } - return nil -} - -func (mf *v030MOFactory) NewMsgResponseOrder(reqID p2pcommon.MsgID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.MsgOrder { - rmo := &pbResponseOrder{} - msgID := uuid.Must(uuid.NewV4()) - if newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.FromBytesOrNil(reqID[:]), protocolID, message) { - return rmo - } - return nil -} - -func (mf *v030MOFactory) NewMsgBlkBroadcastOrder(noticeMsg *types.NewBlockNotice) p2pcommon.MsgOrder { - rmo := &pbBlkNoticeOrder{} - msgID := uuid.Must(uuid.NewV4()) - if newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, subproto.NewBlockNotice, noticeMsg) { - rmo.blkHash = noticeMsg.BlockHash - return rmo - } - return nil -} - -func (mf *v030MOFactory) NewMsgTxBroadcastOrder(message *types.NewTransactionsNotice) p2pcommon.MsgOrder { - rmo := &pbTxNoticeOrder{} - reqID := uuid.Must(uuid.NewV4()) - if newV030MsgOrder(&rmo.pbMessageOrder, reqID, uuid.Nil, subproto.NewTxNotice, message) { - rmo.txHashes = message.TxHashes - return rmo - } - return nil -} - -func (mf *v030MOFactory) NewMsgBPBroadcastOrder(noticeMsg *types.BlockProducedNotice) p2pcommon.MsgOrder { - rmo := &pbBpNoticeOrder{} - msgID := uuid.Must(uuid.NewV4()) - if newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, subproto.BlockProducedNotice, noticeMsg) { - rmo.block = noticeMsg.Block - return rmo - } - return nil -} - -func (mf *v030MOFactory) newHandshakeMessage(protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) p2pcommon.Message { - // TODO define handshake specific datatype - rmo := &pbRequestOrder{} - msgID := uuid.Must(uuid.NewV4()) - if newV030MsgOrder(&rmo.pbMessageOrder, msgID, uuid.Nil, protocolID, message) { - return rmo.message - } - return nil -} - -// newPbMsgOrder is base form of making sendrequest struct -func newV030MsgOrder(mo *pbMessageOrder, msgID, orgID uuid.UUID, protocolID p2pcommon.SubProtocol, message p2pcommon.PbMessage) bool { - bytes, err := p2putil.MarshalMessage(message) - if err != nil { - return false - } - - var id, originalid p2pcommon.MsgID - copy(id[:], msgID[:]) - copy(originalid[:], orgID[:]) - - msg := &V030Message{id: id, originalID: originalid, timestamp: time.Now().UnixNano(), subProtocol: protocolID, payload: bytes, length: uint32(len(bytes))} - mo.protocolID = protocolID - mo.needSign = true - mo.message = msg - - return true -} diff --git a/p2p/v030msg.go b/p2p/v030msg.go deleted file mode 100644 index 92e165f06..000000000 --- a/p2p/v030msg.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * @file - * @copyright defined in aergo/LICENSE.txt - */ - -package p2p - -import ( - "github.com/aergoio/aergo/p2p/p2pcommon" - "time" -) - -// V030Message is basic form of p2p message v0.3 -type V030Message struct { - subProtocol p2pcommon.SubProtocol - // Length is lenght of payload - length uint32 - // timestamp is unix time (precision of second) - timestamp int64 - // ID is 16 bytes unique identifier - id p2pcommon.MsgID - // OriginalID is message id of request which trigger this message. it will be all zero, if message is request or notice. - originalID p2pcommon.MsgID - - // marshaled by google protocol buffer v3. object is determined by Subprotocol - payload []byte -} - -// NewV030Message create a new object -func NewV030Message(msgID, originalID p2pcommon.MsgID, timestamp int64, protocol p2pcommon.SubProtocol, payload []byte) *V030Message { - return &V030Message{id: msgID, originalID:originalID,timestamp:time.Now().UnixNano(), subProtocol:protocol,payload:payload,length:uint32(len(payload))} -} - -func (m *V030Message) Subprotocol() p2pcommon.SubProtocol { - return m.subProtocol -} - -func (m *V030Message) Length() uint32 { - return m.length - -} - -func (m *V030Message) Timestamp() int64 { -return m.timestamp -} - -func (m *V030Message) ID() p2pcommon.MsgID { - return m.id -} - -func (m *V030Message) OriginalID() p2pcommon.MsgID { - return m.originalID -} - -func (m *V030Message) Payload() []byte { - return m.payload -} - -var _ p2pcommon.Message = (*V030Message)(nil) diff --git a/p2p/versionmanager.go b/p2p/versionmanager.go new file mode 100644 index 000000000..2455e68ee --- /dev/null +++ b/p2p/versionmanager.go @@ -0,0 +1,59 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "fmt" + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/p2p/p2pcommon" + v030 "github.com/aergoio/aergo/p2p/v030" + "github.com/aergoio/aergo/types" + peer2 "github.com/libp2p/go-libp2p-peer" + "io" +) + +type defaultVersionManager struct { + pm p2pcommon.PeerManager + actor p2pcommon.ActorService + logger *log.Logger + + // check if is it adhoc + localChainID *types.ChainID +} + +func newDefaultVersionManager(pm p2pcommon.PeerManager, actor p2pcommon.ActorService, logger *log.Logger, localChainID *types.ChainID) *defaultVersionManager { + return &defaultVersionManager{pm: pm, actor: actor, logger: logger, localChainID: localChainID} +} + +func (vm *defaultVersionManager) FindBestP2PVersion(versions []p2pcommon.P2PVersion) p2pcommon.P2PVersion { + for _, suppored := range CurrentSupported { + for _, reqVer := range versions { + if suppored == reqVer { + return reqVer + } + } + } + return p2pcommon.P2PVersionUnknown +} + +func (h *defaultVersionManager) GetVersionedHandshaker(version p2pcommon.P2PVersion, peerID peer2.ID, r io.Reader, w io.Writer) (p2pcommon.VersionedHandshaker, error) { + switch version { + case p2pcommon.P2PVersion031: + // TODO: + v030hs := v030.NewV030StateHS(h.pm, h.actor, h.logger, h.localChainID, peerID, r, w) + return v030hs, nil + case p2pcommon.P2PVersion030: + v030hs := v030.NewV030StateHS(h.pm, h.actor, h.logger, h.localChainID, peerID, r, w) + return v030hs, nil + default: + return nil, fmt.Errorf("not supported version") + } +} + +func (vm *defaultVersionManager) InjectHandlers(version p2pcommon.P2PVersion, peer p2pcommon.RemotePeer) { + panic("implement me") +} + diff --git a/p2p/versionmanager_test.go b/p2p/versionmanager_test.go new file mode 100644 index 000000000..6cf6ea284 --- /dev/null +++ b/p2p/versionmanager_test.go @@ -0,0 +1,97 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "bytes" + "io" + "reflect" + "testing" + + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/types" + peer2 "github.com/libp2p/go-libp2p-peer" +) + +func Test_defaultVersionManager_FindBestP2PVersion(t *testing.T) { + type fields struct { + pm p2pcommon.PeerManager + actor p2pcommon.ActorService + logger *log.Logger + localChainID *types.ChainID + } + type args struct { + versions []p2pcommon.P2PVersion + } + tests := []struct { + name string + fields fields + args args + want p2pcommon.P2PVersion + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + vm := &defaultVersionManager{ + pm: tt.fields.pm, + actor: tt.fields.actor, + logger: tt.fields.logger, + localChainID: tt.fields.localChainID, + } + if got := vm.FindBestP2PVersion(tt.args.versions); !reflect.DeepEqual(got, tt.want) { + t.Errorf("defaultVersionManager.FindBestP2PVersion() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_defaultVersionManager_GetVersionedHandshaker(t *testing.T) { + type fields struct { + pm p2pcommon.PeerManager + actor p2pcommon.ActorService + logger *log.Logger + localChainID *types.ChainID + } + type args struct { + version p2pcommon.P2PVersion + peerID peer2.ID + r io.Reader + } + tests := []struct { + name string + fields fields + args args + want p2pcommon.VersionedHandshaker + wantW string + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := &defaultVersionManager{ + pm: tt.fields.pm, + actor: tt.fields.actor, + logger: tt.fields.logger, + localChainID: tt.fields.localChainID, + } + w := &bytes.Buffer{} + got, err := h.GetVersionedHandshaker(tt.args.version, tt.args.peerID, tt.args.r, w) + if (err != nil) != tt.wantErr { + t.Errorf("defaultVersionManager.GetVersionedHandshaker() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("defaultVersionManager.GetVersionedHandshaker() = %v, want %v", got, tt.want) + } + if gotW := w.String(); gotW != tt.wantW { + t.Errorf("defaultVersionManager.GetVersionedHandshaker() = %v, want %v", gotW, tt.wantW) + } + }) + } +} diff --git a/p2p/waitpeermanager.go b/p2p/waitpeermanager.go new file mode 100644 index 000000000..dfdce7ba6 --- /dev/null +++ b/p2p/waitpeermanager.go @@ -0,0 +1,387 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "errors" + "fmt" + "github.com/aergoio/aergo-lib/log" + "github.com/aergoio/aergo/p2p/metric" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2putil" + "github.com/aergoio/aergo/p2p/subproto" + "github.com/aergoio/aergo/types" + net "github.com/libp2p/go-libp2p-net" + "github.com/libp2p/go-libp2p-peer" + "sort" + "time" +) + +func NewWaitingPeerManager(logger *log.Logger, pm *peerManager, actorService p2pcommon.ActorService, maxCap int, useDiscover, usePolaris bool) p2pcommon.WaitingPeerManager { + var wpm p2pcommon.WaitingPeerManager + if !useDiscover { + sp := &staticWPManager{basePeerManager{pm: pm, logger: logger,workingJobs:make(map[peer.ID]ConnWork)}} + wpm = sp + } else { + dp := &dynamicWPManager{basePeerManager:basePeerManager{pm: pm, logger: logger, workingJobs:make(map[peer.ID]ConnWork)}, maxPeers: maxCap} + wpm = dp + } + + return wpm +} + +type basePeerManager struct { + pm *peerManager + + logger *log.Logger + workingJobs map[peer.ID]ConnWork + +} + + +func (dpm *basePeerManager) OnInboundConn(s net.Stream) { + version := p2pcommon.P2PVersion031 + + peerID := s.Conn().RemotePeer() + tempMeta := p2pcommon.PeerMeta{ID: peerID} + addr := s.Conn().RemoteMultiaddr() + + dpm.logger.Debug().Str(p2putil.LogFullID, peerID.Pretty()).Str("multiaddr", addr.String()).Msg("new inbound peer arrived") + query := inboundConnEvent{meta: tempMeta, p2pVer: p2pcommon.P2PVersionUnknown, foundC: make(chan bool)} + dpm.pm.inboundConnChan <- query + if exist := <-query.foundC; exist { + dpm.logger.Debug().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("same peer as inbound peer already exists.") + s.Close() + return + } + + h := dpm.pm.hsFactory.CreateHSHandler(version, false, peerID) + // check if remote peer is connected (already handshaked) + completeMeta, added := dpm.tryAddPeer(false, tempMeta, s, h) + if !added { + s.Close() + } else { + if tempMeta.IPAddress != completeMeta.IPAddress { + dpm.logger.Debug().Str("after", completeMeta.IPAddress).Msg("Update IP address of inbound remote peer") + } + } +} + +func (dpm *basePeerManager) OnInboundConnLegacy(s net.Stream) { + version := p2pcommon.P2PVersion030 + peerID := s.Conn().RemotePeer() + tempMeta := p2pcommon.PeerMeta{ID: peerID} + addr := s.Conn().RemoteMultiaddr() + + dpm.logger.Debug().Str(p2putil.LogFullID, peerID.Pretty()).Str("multiaddr", addr.String()).Msg("new legacy inbound peer arrived") + query := inboundConnEvent{meta: tempMeta, p2pVer: version, foundC: make(chan bool)} + dpm.pm.inboundConnChan <- query + if exist := <-query.foundC; exist { + dpm.logger.Debug().Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("same peer as inbound peer already exists.") + s.Close() + return + } + + h := dpm.pm.hsFactory.CreateHSHandler(version, false, peerID) + // check if remote peer is connected (already handshaked) + completeMeta, added := dpm.tryAddPeer(false, tempMeta, s, h) + if !added { + s.Close() + } else { + if tempMeta.IPAddress != completeMeta.IPAddress { + dpm.logger.Debug().Str("after", completeMeta.IPAddress).Msg("Update IP address of inbound remote peer") + } + } +} + +func (dpm *basePeerManager) CheckAndConnect() { + dpm.logger.Debug().Msg("checking space to connect more peers") + maxJobs := dpm.getRemainingSpaces() + if maxJobs == 0 { + return + } + dpm.connectWaitingPeers(maxJobs) +} + +func (dpm *basePeerManager) connectWaitingPeers(maxJob int) { + // do try to connection at most maxJobs cnt, + peers := make([]*p2pcommon.WaitingPeer, 0, len(dpm.pm.waitingPeers)) + for _, wp := range dpm.pm.waitingPeers { + peers = append(peers,wp) + } + sort.Sort(byNextTrial(peers)) + + added := 0 + now := time.Now() + for _, wp := range peers { + if added >= maxJob { + break + } + if wp.NextTrial.Before(now) { + // check if peer is currently working now + if _, exist := dpm.workingJobs[wp.Meta.ID]; exist { + continue + } + dpm.logger.Info().Int("trial", wp.TrialCnt).Str(p2putil.LogPeerID, p2putil.ShortForm(wp.Meta.ID)).Msg("Starting scheduled try to connect peer") + + dpm.workingJobs[wp.Meta.ID] = ConnWork{Meta: wp.Meta, PeerID:wp.Meta.ID, StartTime:time.Now()} + go dpm.runTryOutboundConnect(wp) + added++ + } else { + break + } + } +} + +// getRemainingSpaces check and return the number that can do connection work. +// the number depends on the number of current works and the number of waiting peers +func (dpm *basePeerManager) getRemainingSpaces() int { + // simpler version. just check total count + // has space to add more connection + if len(dpm.pm.waitingPeers) <= 0 { + return 0 + } + affordWorker := p2pcommon.MaxConcurrentHandshake - len(dpm.workingJobs) + if affordWorker <= 0 { + return 0 + } + return affordWorker +} + + +func (dpm *basePeerManager) runTryOutboundConnect(wp *p2pcommon.WaitingPeer) { + workResult := p2pcommon.ConnWorkResult{Meta: wp.Meta, TargetPeer: wp} + defer func() { + dpm.pm.workDoneChannel <- workResult + }() + + + meta := wp.Meta + p2pversion, s, err := dpm.getStream(meta) + if err != nil { + dpm.logger.Info().Err(err).Str(p2putil.LogPeerID, p2putil.ShortForm(meta.ID)).Msg("Failed to get stream.") + workResult.Result = err + return + } + h := dpm.pm.hsFactory.CreateHSHandler(p2pversion, true, meta.ID) + // handshake + completeMeta, added := dpm.tryAddPeer(true, meta, s, h) + if !added { + s.Close() + workResult.Result = errors.New("handshake failed") + return + } else { + if meta.IPAddress != completeMeta.IPAddress { + dpm.logger.Debug().Str(p2putil.LogPeerID, p2putil.ShortForm(completeMeta.ID)).Str("before", meta.IPAddress).Str("after", completeMeta.IPAddress).Msg("IP address of remote peer is changed to ") + } + } +} + +func (dpm *basePeerManager) getStream(meta p2pcommon.PeerMeta) (p2pcommon.P2PVersion, net.Stream, error) { + // try connect peer with possible versions + s, err := dpm.pm.nt.GetOrCreateStream(meta, p2pcommon.P2PSubAddr, p2pcommon.LegacyP2PSubAddr) + if err != nil { + return p2pcommon.P2PVersionUnknown, nil, err + } + switch s.Protocol() { + case p2pcommon.P2PSubAddr: + return p2pcommon.P2PVersion031, s, nil + case p2pcommon.LegacyP2PSubAddr: + return p2pcommon.P2PVersion030, s, err + default: + return p2pcommon.P2PVersionUnknown, nil, fmt.Errorf("unknown p2p wire protocol %v",s.Protocol()) + } +} + +// tryAddPeer will do check connecting peer and add. it will return peer meta information received from +// remote peer. stream s will be owned to remotePeer if succeed to add perr. +func (dpm *basePeerManager) tryAddPeer(outbound bool, meta p2pcommon.PeerMeta, s net.Stream, h p2pcommon.HSHandler) (p2pcommon.PeerMeta, bool) { + var peerID = meta.ID + rd := metric.NewReader(s) + wt := metric.NewWriter(s) + msgRW, remoteStatus, err := h.Handle(rd, wt, defaultHandshakeTTL) + if err != nil { + dpm.logger.Debug().Err(err).Bool("outbound",outbound).Str(p2putil.LogPeerID, p2putil.ShortForm(meta.ID)).Msg("Failed to handshake") + if msgRW != nil { + dpm.sendGoAway(msgRW, err.Error()) + } + return meta, false + } + // update peer meta info using sent information from remote peer + receivedMeta := p2pcommon.NewMetaFromStatus(remoteStatus, outbound) + if receivedMeta.ID != peerID { + dpm.logger.Debug().Str("received_peer_id", receivedMeta.ID.Pretty()).Str(p2putil.LogPeerID, p2putil.ShortForm(peerID)).Msg("Inconsistent peerID") + dpm.sendGoAway(msgRW, "Inconsistent peerID") + return meta, false + } + // override options by configurations of nodd + _, receivedMeta.Designated = dpm.pm.designatedPeers[peerID] + // hidden is set by either remote peer's asking or local node's config + if _, exist := dpm.pm.hiddenPeerSet[peerID]; exist { + receivedMeta.Hidden = true + } + + newPeer := newRemotePeer(receivedMeta, dpm.pm.GetNextManageNum(), dpm.pm, dpm.pm.actorService, dpm.logger, dpm.pm.mf, dpm.pm.signer, s, msgRW) + newPeer.UpdateBlkCache(remoteStatus.GetBestBlockHash(), remoteStatus.GetBestHeight()) + + // insert Handlers + dpm.pm.handlerFactory.InsertHandlers(newPeer) + + dpm.pm.peerHandshaked <- newPeer + return receivedMeta, true +} + +func (dpm *basePeerManager) OnWorkDone(result p2pcommon.ConnWorkResult) { + meta := result.Meta + delete(dpm.workingJobs, meta.ID) + wp, ok := dpm.pm.waitingPeers[meta.ID] + if !ok { + dpm.logger.Debug().Str(p2putil.LogPeerName, p2putil.ShortMetaForm(meta)).Err(result.Result).Msg("Connection job finished") + return + } else { + dpm.logger.Debug().Str(p2putil.LogPeerName, p2putil.ShortMetaForm(meta)).Int("trial",wp.TrialCnt).Err(result.Result).Msg("Connection job finished") + } + wp.LastResult = result.Result + // success to connect + if result.Result == nil { + dpm.logger.Debug().Str(p2putil.LogPeerName, p2putil.ShortMetaForm(meta)).Msg("Deleting unimportant failed peer.") + delete(dpm.pm.waitingPeers,meta.ID) + } else { + // leave waitingpeer if needed to reconnect + if !setNextTrial(wp) { + dpm.logger.Debug().Str(p2putil.LogPeerName, p2putil.ShortMetaForm(meta)).Time("next_time",wp.NextTrial).Msg("Failed Connection will be retried") + delete(dpm.pm.waitingPeers,meta.ID) + } + } + +} + +func (dpm *basePeerManager) sendGoAway(rw p2pcommon.MsgReadWriter, msg string) { + goMsg := &types.GoAwayNotice{Message: msg} + // TODO code smell. non safe casting. too many member depth + mo := dpm.pm.mf.NewMsgRequestOrder(false, subproto.GoAway, goMsg).(*pbRequestOrder) + container := mo.message + + rw.WriteMsg(container) +} + + +type staticWPManager struct { + basePeerManager +} + + +func (spm *staticWPManager) OnPeerConnect(pid peer.ID) { + delete(spm.pm.waitingPeers, pid) +} + +func (spm *staticWPManager) OnPeerDisconnect(peer p2pcommon.RemotePeer) { + // if peer is designated peer , try reconnect by add peermeta to waiting peer + if _, ok := spm.pm.designatedPeers[peer.ID()]; ok { + spm.logger.Debug().Str(p2putil.LogPeerID, peer.Name()).Msg("server will try to reconnect designated peer after cooltime") + // These peers must have cool time. + spm.pm.waitingPeers[peer.ID()] = &p2pcommon.WaitingPeer{Meta: peer.Meta(), NextTrial: time.Now().Add(firstReconnectColltime)} + } +} + + +func (spm *staticWPManager) OnDiscoveredPeers(metas []p2pcommon.PeerMeta) int { + // static manager don't need to discovered peer. + return 0 +} + +type dynamicWPManager struct { + basePeerManager + + maxPeers int +} + +func (dpm *dynamicWPManager) OnPeerConnect(pid peer.ID) { + // remove peer from wait pool + delete(dpm.pm.waitingPeers, pid) +} + +func (dpm *dynamicWPManager) OnPeerDisconnect(peer p2pcommon.RemotePeer) { + // if peer is designated peer or trusted enough , try reconnect by add peermeta to waiting peer + // TODO check by trust level is not implemented yet. + if _, ok := dpm.pm.designatedPeers[peer.ID()]; ok { + dpm.logger.Debug().Str(p2putil.LogPeerID, peer.Name()).Msg("server will try to reconnect designated peer after cooltime") + // These peers must have cool time. + dpm.pm.waitingPeers[peer.ID()] = &p2pcommon.WaitingPeer{Meta: peer.Meta(), NextTrial: time.Now().Add(firstReconnectColltime)} + //dpm.pm.addAwait(peer.Meta()) + } +} + +func (dpm *dynamicWPManager) OnDiscoveredPeers(metas []p2pcommon.PeerMeta) int { + addedWP := 0 + for _, meta := range metas { + if _, ok := dpm.pm.remotePeers[meta.ID]; ok { + // skip connected peer + continue + } else if _, ok := dpm.pm.waitingPeers[meta.ID]; ok { + // skip already waiting peer + continue + } + // TODO check blacklist later. + dpm.pm.waitingPeers[meta.ID] = &p2pcommon.WaitingPeer{Meta: meta, NextTrial: time.Now()} + addedWP++ + } + return addedWP +} + +func (dpm *dynamicWPManager) CheckAndConnect() { + dpm.logger.Debug().Msg("checking space to connect more peers") + maxJobs := dpm.getRemainingSpaces() + if maxJobs == 0 { + return + } + dpm.connectWaitingPeers(maxJobs) +} + +func (dpm *dynamicWPManager) getRemainingSpaces() int { + // simpler version. just check total count + // has space to add more connection + affordCnt := dpm.maxPeers - len(dpm.pm.remotePeers) - len(dpm.workingJobs) + if affordCnt <= 0 { + return 0 + } + affordWorker := dpm.basePeerManager.getRemainingSpaces() + if affordCnt < affordWorker { + return affordCnt + } else { + return affordWorker + } +} + +type inboundConnEvent struct { + meta p2pcommon.PeerMeta + p2pVer p2pcommon.P2PVersion + foundC chan bool +} + +type byNextTrial []*p2pcommon.WaitingPeer + +func (a byNextTrial) Len() int { return len(a) } +func (a byNextTrial) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byNextTrial) Less(i, j int) bool { return a[i].NextTrial.Before(a[j].NextTrial) } + +type ConnWork struct { + PeerID peer.ID + Meta p2pcommon.PeerMeta + StartTime time.Time +} + +// setNextTrial check if peer is worthy to connect, and set time when the server try to connect next time. +// It will true if this node is worth to try connect again, or return false if not. +func setNextTrial(wp *p2pcommon.WaitingPeer) bool { + if wp.Meta.Designated { + wp.TrialCnt++ + wp.NextTrial = time.Now().Add(getNextInterval(wp.TrialCnt)) + return true + } else { + return false + } +} \ No newline at end of file diff --git a/p2p/waitpeermanager_test.go b/p2p/waitpeermanager_test.go new file mode 100644 index 000000000..84a29dee4 --- /dev/null +++ b/p2p/waitpeermanager_test.go @@ -0,0 +1,270 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package p2p + +import ( + "errors" + "github.com/aergoio/aergo/p2p/subproto" + "github.com/aergoio/aergo/types" + crypto "github.com/libp2p/go-libp2p-crypto" + "testing" + "time" + + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/golang/mock/gomock" + "github.com/libp2p/go-libp2p-peer" +) + +const ( + OneDay = time.Hour * 24 +) + +func Test_staticWPManager_OnDiscoveredPeers(t *testing.T) { + ctrl := gomock.NewController(t) + + type args struct { + metas []p2pcommon.PeerMeta + } + tests := []struct { + name string + args args + wantCount int + }{ + {"TSingleDesign", args{desigPeers[:1]}, 0}, + {"TAllDesign", args{desigPeers}, 0}, + {"TNewID", args{unknowPeers}, 0}, + {"TMixedIDs", args{append(unknowPeers[:5], desigPeers[:5]...)}, 0}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dummyPM := createDummyPM() + mockActor := p2pmock.NewMockActorService(ctrl) + dp := NewWaitingPeerManager(logger, dummyPM, mockActor, 10, false, false).(*staticWPManager) + + dp.OnDiscoveredPeers(tt.args.metas) + if len(dummyPM.waitingPeers) != tt.wantCount { + t.Errorf("count waitingPeer %v, want %v", len(dummyPM.waitingPeers), tt.wantCount) + } + }) + } +} + +func Test_dynamicWPManager_OnDiscoveredPeers(t *testing.T) { + ctrl := gomock.NewController(t) + + type args struct { + preConnected []peer.ID + metas []p2pcommon.PeerMeta + } + tests := []struct { + name string + args args + wantCount int + }{ + {"TAllNew", args{nil, desigPeers[:1]}, 1}, + {"TAllExist", args{desigIDs, desigPeers[:5]}, 0}, + {"TMixedIDs", args{desigIDs, append(unknowPeers[:5], desigPeers[:5]...)}, 5}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dummyPM := createDummyPM() + mockActor := p2pmock.NewMockActorService(ctrl) + dp := NewWaitingPeerManager(logger, dummyPM, mockActor, 10, true, false) + for _, id := range tt.args.preConnected { + dummyPM.remotePeers[id] = &remotePeerImpl{} + dp.OnPeerConnect(id) + } + + dp.OnDiscoveredPeers(tt.args.metas) + if len(dummyPM.waitingPeers) != tt.wantCount { + t.Errorf("count waitingPeer %v, want %v", len(dummyPM.waitingPeers), tt.wantCount) + } + }) + } +} + +func Test_setNextTrial(t *testing.T) { + dummyDesignated := p2pcommon.PeerMeta{Designated: true} + + type args struct { + wp *p2pcommon.WaitingPeer + setCnt int + } + tests := []struct { + name string + args args + want bool + }{ + {"TDesig1", args{&p2pcommon.WaitingPeer{Meta: dummyDesignated}, 1}, true}, + {"TDesigSome", args{&p2pcommon.WaitingPeer{Meta: dummyDesignated}, 5}, true}, + {"TDesigMany", args{&p2pcommon.WaitingPeer{Meta: dummyDesignated}, 30}, true}, + + {"TUnknown1", args{&p2pcommon.WaitingPeer{Meta: dummyMeta}, 1}, false}, + {"TUnknownSome", args{&p2pcommon.WaitingPeer{Meta: dummyMeta}, 5}, false}, + {"TUnknownMany", args{&p2pcommon.WaitingPeer{Meta: dummyMeta}, 30}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + lastResult := false + prevDuration := time.Duration(0) + for i := 0; i < tt.args.setCnt; i++ { + now := time.Now() + lastResult = setNextTrial(tt.args.wp) + gotDuration := tt.args.wp.NextTrial.Sub(now) + // nextTrial time will be increated exponetially and clipped when trial count is bigger than internal count + // the clipped + if lastResult && + (gotDuration < prevDuration && gotDuration < OneDay) { + t.Errorf("smaller duration %v, want at least %v", gotDuration, prevDuration) + } + prevDuration = gotDuration + } + + if lastResult != tt.want { + t.Errorf("setNextTrial() = %v, want %v", lastResult, tt.want) + } + }) + } +} + +func Test_basePeerManager_tryAddPeer(t *testing.T) { + ctrl := gomock.NewController(t) + + // id0 is in both desginated peer and hidden peer + desigIDs := make([]peer.ID,3) + desigPeers := make(map[peer.ID]p2pcommon.PeerMeta,3) + + hiddenIDs := make([]peer.ID,3) + hiddenPeers := make(map[peer.ID]bool) + + for i:=0;i<3;i++ { + pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) + pid, _ := peer.IDFromPrivateKey(pkey) + desigIDs[i] = pid + desigPeers[pid] = p2pcommon.PeerMeta{ID:pid} + } + hiddenIDs[0] =desigIDs[0] + hiddenPeers[desigIDs[0]] = true + + for i:=1;i<3;i++ { + pkey, _, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) + pid, _ := peer.IDFromPrivateKey(pkey) + hiddenIDs[i] = pid + hiddenPeers[pid] = true + } + + + // tests for add peer. + type args struct { + outbound bool + meta p2pcommon.PeerMeta + } + + tests := []struct { + name string + args args + + hsRet *types.Status + hsErr error + + wantDesign bool + wantHidden bool + wantID peer.ID + wantSucc bool + }{ + // add inbound peer + {"TIn", args{false, p2pcommon.PeerMeta{ID:dummyPeerID}}, + dummyStatus(dummyPeerID, false), nil, false, false, dummyPeerID, true}, + // add inbound designated peer + {"TInDesignated", args{false, p2pcommon.PeerMeta{ID:desigIDs[1]}}, + dummyStatus(desigIDs[1], false), nil,true, false, desigIDs[1], true}, + // add inbound hidden peer + {"TInHidden", args{false, p2pcommon.PeerMeta{ID:dummyPeerID}}, + dummyStatus(dummyPeerID, true), nil,false, true, dummyPeerID, true}, + // add inbound peer (hidden in node config) + {"TInHiddenInConf", args{false, p2pcommon.PeerMeta{ID:hiddenIDs[1]}}, + dummyStatus(hiddenIDs[1], false), nil,false, true, hiddenIDs[1], true}, + {"TInH&D", args{false, p2pcommon.PeerMeta{ID:hiddenIDs[0], Hidden:true}}, + dummyStatus(hiddenIDs[0], true), nil,true, true, hiddenIDs[0], true}, + + // add outbound peer + {"TOut", args{true, p2pcommon.PeerMeta{ID:dummyPeerID}}, + dummyStatus(dummyPeerID, false), nil, false, false, dummyPeerID, true}, + // add outbound designated peer + {"TOutDesignated", args{true, p2pcommon.PeerMeta{ID:desigIDs[1]}}, + dummyStatus(desigIDs[1], false), nil,true, false, desigIDs[1], true}, + // add outbound hidden peer + {"TOutHidden", args{true, p2pcommon.PeerMeta{ID:dummyPeerID}}, + dummyStatus(dummyPeerID, true), nil,false, true, dummyPeerID, true}, + // add outbound peer (hidden in node config) + {"TOutHiddenInConf", args{true, p2pcommon.PeerMeta{ID:hiddenIDs[1]}}, + dummyStatus(hiddenIDs[1], false), nil,false, true, hiddenIDs[1], true}, + {"TOutH&D", args{true, p2pcommon.PeerMeta{ID:hiddenIDs[0], Hidden:true}}, + dummyStatus(hiddenIDs[0], true), nil,true, true, hiddenIDs[0], true}, + + // failed to handshake + {"TErrHandshake", args{false, p2pcommon.PeerMeta{ID:dummyPeerID}}, + nil, errors.New("handshake err"), false, false, dummyPeerID, false}, + // invalid status information + {"TErrDiffPeerID", args{false, p2pcommon.PeerMeta{ID:dummyPeerID}}, + dummyStatus(dummyPeerID2, false), nil, false, false, dummyPeerID, false}, + + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockStream := p2pmock.NewMockStream(ctrl) + mockHSFactory := p2pmock.NewMockHSHandlerFactory(ctrl) + mockHSHandler := p2pmock.NewMockHSHandler(ctrl) + mockRW := p2pmock.NewMockMsgReadWriter(ctrl) + //mockHSFactory.EXPECT().CreateHSHandler(gomock.Any(), tt.args.outbound, tt.args.meta.ID).Return(mockHSHandler) + mockHSHandler.EXPECT().Handle(gomock.Any(),gomock.Any(),gomock.Any()).Return(mockRW, tt.hsRet,tt.hsErr) + mockHandlerFactory := p2pmock.NewMockHandlerFactory(ctrl) + mockHandlerFactory.EXPECT().InsertHandlers(gomock.AssignableToTypeOf(&remotePeerImpl{})).MaxTimes(1) + + // in cases of handshake error + mockMF := p2pmock.NewMockMoFactory(ctrl) + mockMF.EXPECT().NewMsgRequestOrder(false, subproto.GoAway, gomock.Any()).Return(&pbRequestOrder{}).MaxTimes(1) + mockRW.EXPECT().WriteMsg(gomock.Any()).MaxTimes(1) + + pm := &peerManager{ + mf: mockMF, + hsFactory: mockHSFactory, + designatedPeers: desigPeers, + hiddenPeerSet: hiddenPeers, + handlerFactory:mockHandlerFactory, + peerHandshaked: make(chan p2pcommon.RemotePeer, 10), + } + dpm := &basePeerManager{ + pm: pm, + logger: logger, + } + got, got1 := dpm.tryAddPeer(tt.args.outbound, tt.args.meta, mockStream, mockHSHandler) + if got1 != tt.wantSucc { + t.Errorf("basePeerManager.tryAddPeer() got1 = %v, want %v", got1, tt.wantSucc) + } + if tt.wantSucc { + if got.ID != tt.wantID { + t.Errorf("basePeerManager.tryAddPeer() got ID = %v, want %v", got.ID, tt.wantID) + } + if got.Outbound != tt.args.outbound { + t.Errorf("basePeerManager.tryAddPeer() got bound = %v, want %v", got.Outbound, tt.args.outbound) + } + if got.Designated != tt.wantDesign { + t.Errorf("basePeerManager.tryAddPeer() got Designated = %v, want %v", got.Designated, tt.wantDesign) + } + if got.Hidden != tt.wantHidden { + t.Errorf("basePeerManager.tryAddPeer() got Hidden = %v, want %v", got.Hidden, tt.wantHidden) + } + } + + }) + } +} + +func dummyStatus(id peer.ID, noexpose bool) *types.Status { + return &types.Status{Sender:&types.PeerAddress{PeerID:[]byte(id)},NoExpose:noexpose} +} \ No newline at end of file diff --git a/p2p/pmap/polarisconnect.go b/polaris/client/polarisconnect.go similarity index 82% rename from p2p/pmap/polarisconnect.go rename to polaris/client/polarisconnect.go index 746596cb8..705f49147 100644 --- a/p2p/pmap/polarisconnect.go +++ b/polaris/client/polarisconnect.go @@ -3,21 +3,23 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package client import ( "bufio" "fmt" + "github.com/aergoio/aergo/p2p/p2pkey" + "github.com/aergoio/aergo/p2p/subproto" + "github.com/aergoio/aergo/p2p/v030" + "github.com/aergoio/aergo/polaris/common" "sync" "github.com/aergoio/aergo-actor/actor" "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/config" "github.com/aergoio/aergo/message" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2putil" - "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/pkg/component" "github.com/aergoio/aergo/types" "github.com/libp2p/go-libp2p-net" @@ -60,18 +62,18 @@ func (pcs *PolarisConnectSvc) initSvc(cfg *config.P2PConfig) { if !pcs.PrivateChain { servers := make([]string, 0) // add hardcoded built-in servers if net is ONE net. - if *pcs.ntc.ChainID() == ONEMainNet { + if *pcs.ntc.ChainID() == common.ONEMainNet { pcs.Logger.Info().Msg("chain is ONE Mainnet so use default polaris for mainnet") - servers = MainnetMapServer - } else if *pcs.ntc.ChainID() == ONETestNet { + servers = common.MainnetMapServer + } else if *pcs.ntc.ChainID() == common.ONETestNet { pcs.Logger.Info().Msg("chain is ONE Testnet so use default polaris for testnet") - servers = TestnetMapServer + servers = common.TestnetMapServer } else { pcs.Logger.Info().Msg("chain is custom public network so only custom polaris in configuration file will be used") } for _, addrStr := range servers { - meta, err := p2p.ParseMultiAddrString(addrStr) + meta, err := p2putil.ParseMultiAddrString(addrStr) if err != nil { pcs.Logger.Info().Str("addr_str", addrStr).Msg("invalid polaris server address in base setting ") continue @@ -83,7 +85,7 @@ func (pcs *PolarisConnectSvc) initSvc(cfg *config.P2PConfig) { } // append custom polarises set in configuration file for _, addrStr := range cfg.NPAddPolarises { - meta, err := p2p.ParseMultiAddrString(addrStr) + meta, err := p2putil.ParseMultiAddrString(addrStr) if err != nil { pcs.Logger.Info().Str("addr_str", addrStr).Msg("invalid polaris server address in config file ") continue @@ -94,7 +96,7 @@ func (pcs *PolarisConnectSvc) initSvc(cfg *config.P2PConfig) { if len(pcs.mapServers) == 0 { pcs.Logger.Warn().Msg("using polais is enabled but no active polaris server found. node discovery by polaris is disabled") } else { - pcs.Logger.Info().Array("polarises",p2putil.NewLogPeerMetasMarshaler(pcs.mapServers,10)).Msg("using polaris") + pcs.Logger.Info().Array("polarises", p2putil.NewLogPeerMetasMarshaler(pcs.mapServers, 10)).Msg("using polaris") } } else { pcs.Logger.Info().Msg("node discovery by polaris is disabled configuration.") @@ -105,12 +107,12 @@ func (pcs *PolarisConnectSvc) BeforeStart() {} func (pcs *PolarisConnectSvc) AfterStart() { pcs.nt = pcs.ntc.GetNetworkTransport() - pcs.nt.AddStreamHandler(PolarisPingSub, pcs.onPing) + pcs.nt.AddStreamHandler(common.PolarisPingSub, pcs.onPing) } func (pcs *PolarisConnectSvc) BeforeStop() { - pcs.nt.RemoveStreamHandler(PolarisPingSub) + pcs.nt.RemoveStreamHandler(common.PolarisPingSub) } func (pcs *PolarisConnectSvc) Statistics() *map[string]interface{} { @@ -152,7 +154,7 @@ func (pcs *PolarisConnectSvc) queryPeers(msg *message.MapQueryMsg) *message.MapQ } func (pcs *PolarisConnectSvc) connectAndQuery(mapServerMeta p2pcommon.PeerMeta, bestHash []byte, bestHeight uint64) ([]*types.PeerAddress, error) { - s, err := pcs.nt.GetOrCreateStreamWithTTL(mapServerMeta, PolarisMapSub, PolarisConnectionTTL) + s, err := pcs.nt.GetOrCreateStreamWithTTL(mapServerMeta, common.PolarisConnectionTTL, common.PolarisMapSub) if err != nil { return nil, err } @@ -164,11 +166,12 @@ func (pcs *PolarisConnectSvc) connectAndQuery(mapServerMeta p2pcommon.PeerMeta, } pcs.Logger.Debug().Str(p2putil.LogPeerID, peerID.String()).Msg("Sending map query") - rw := p2p.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + rw := v030.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) peerAddress := pcs.nt.SelfMeta().ToPeerAddress() chainBytes, _ := pcs.ntc.ChainID().Bytes() - peerStatus := &types.Status{Sender: &peerAddress, BestBlockHash: bestHash, BestHeight: bestHeight, ChainID: chainBytes} + peerStatus := &types.Status{Sender: &peerAddress, BestBlockHash: bestHash, BestHeight: bestHeight, ChainID: chainBytes, + Version:p2pkey.NodeVersion()} // receive input err = pcs.sendRequest(peerStatus, mapServerMeta, pcs.exposeself, 100, rw) if err != nil { @@ -187,12 +190,13 @@ func (pcs *PolarisConnectSvc) connectAndQuery(mapServerMeta p2pcommon.PeerMeta, func (pcs *PolarisConnectSvc) sendRequest(status *types.Status, mapServerMeta p2pcommon.PeerMeta, register bool, size int, wt p2pcommon.MsgWriter) error { msgID := p2pcommon.NewMsgID() queryReq := &types.MapQuery{Status: status, Size: int32(size), AddMe: register, Excludes: [][]byte{[]byte(mapServerMeta.ID)}} - respMsg, err := createV030Message(msgID, EmptyMsgID, MapQuery, queryReq) + bytes, err := p2putil.MarshalMessageBody(queryReq) if err != nil { return err } + reqMsg := common.NewPolarisMessage(msgID, common.MapQuery, bytes) - return wt.WriteMsg(respMsg) + return wt.WriteMsg(reqMsg) } // tryAddPeer will do check connecting peer and add. it will return peer meta information received from @@ -203,7 +207,7 @@ func (pcs *PolarisConnectSvc) readResponse(mapServerMeta p2pcommon.PeerMeta, rd return nil, nil, err } queryResp := &types.MapResponse{} - err = p2putil.UnmarshalMessage(data.Payload(), queryResp) + err = p2putil.UnmarshalMessageBody(data.Payload(), queryResp) if err != nil { return data, nil, err } @@ -216,7 +220,7 @@ func (pcs *PolarisConnectSvc) onPing(s net.Stream) { peerID := s.Conn().RemotePeer() pcs.Logger.Debug().Str(p2putil.LogPeerID, peerID.String()).Msg("Received ping from polaris (maybe)") - rw := p2p.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + rw := v030.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) defer s.Close() req, err := rw.ReadMsg() @@ -224,18 +228,18 @@ func (pcs *PolarisConnectSvc) onPing(s net.Stream) { return } pingReq := &types.Ping{} - err = p2putil.UnmarshalMessage(req.Payload(), pingReq) + err = p2putil.UnmarshalMessageBody(req.Payload(), pingReq) if err != nil { return } // TODO: check if sender is known polaris or peer and it not, ban or write to blacklist . pingResp := &types.Ping{} - msgID := p2pcommon.NewMsgID() - respMsg, err := createV030Message(msgID, req.ID(), subproto.PingResponse, pingResp) + bytes, err := p2putil.MarshalMessageBody(pingResp) if err != nil { return } - + msgID := p2pcommon.NewMsgID() + respMsg := common.NewPolarisRespMessage(msgID, req.ID(), subproto.PingResponse, bytes) err = rw.WriteMsg(respMsg) if err != nil { return diff --git a/polaris/client/polarisconnect_test.go b/polaris/client/polarisconnect_test.go new file mode 100644 index 000000000..97589033f --- /dev/null +++ b/polaris/client/polarisconnect_test.go @@ -0,0 +1,138 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package client + +import ( + "github.com/aergoio/aergo/polaris/common" + "testing" + + "github.com/aergoio/aergo/config" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pmock" + "github.com/aergoio/aergo/pkg/component" + "github.com/aergoio/aergo/types" + "github.com/golang/mock/gomock" + peer "github.com/libp2p/go-libp2p-peer" +) + + +type dummyNTC struct { + nt p2pcommon.NetworkTransport + chainID *types.ChainID +} + +func (dntc *dummyNTC) GetNetworkTransport() p2pcommon.NetworkTransport { + return dntc.nt +} +func (dntc *dummyNTC) ChainID() *types.ChainID { + return dntc.chainID +} + +var ( + pmapDummyCfg = &config.Config{P2P: &config.P2PConfig{}, Polaris: &config.PolarisConfig{GenesisFile: "../../examples/genesis.json"}} + pmapDummyNTC = &dummyNTC{chainID: &types.ChainID{}} +) + +// initSvc select Polarises to connect, or disable polaris +func TestPolarisConnectSvc_initSvc(t *testing.T) { + polarisIDMain, _ := peer.IDB58Decode("16Uiu2HAkuxyDkMTQTGFpmnex2SdfTVzYfPztTyK339rqUdsv3ZUa") + polarisIDTest, _ := peer.IDB58Decode("16Uiu2HAkvJTHFuJXxr15rFEHsJWnyn1QvGatW2E9ED9Mvy4HWjVF") + dummyPeerID2, _ := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") + polar2 := "/ip4/172.21.1.2/tcp/8915/p2p/16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm" + dummyPeerID3, _ := peer.IDB58Decode("16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD") + polar3 := "/ip4/172.22.2.3/tcp/8915/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD" + + customChainID := types.ChainID{Magic: "unittest.blocko.io"} + type args struct { + use bool + polarises []string + + chainID *types.ChainID + } + tests := []struct { + name string + args args + + wantCnt int + peerIDs []peer.ID + }{ + // + {"TAergoNoPolaris", args{false, nil, &common.ONEMainNet}, 0, []peer.ID{}}, + {"TAergoMainDefault", args{true, nil, &common.ONEMainNet}, 1, []peer.ID{polarisIDMain}}, + {"TAergoMainPlusCfg", args{true, []string{polar2, polar3}, &common.ONEMainNet}, 3, []peer.ID{polarisIDMain, dummyPeerID2, dummyPeerID3}}, + {"TAergoTestDefault", args{true, nil, &common.ONETestNet}, 1, []peer.ID{polarisIDTest}}, + {"TAergoTestPlusCfg", args{true, []string{polar2, polar3}, &common.ONETestNet}, 3, []peer.ID{polarisIDTest, dummyPeerID2, dummyPeerID3}}, + {"TCustom", args{true, nil, &customChainID}, 0, []peer.ID{}}, + {"TCustomPlusCfg", args{true, []string{polar2, polar3}, &customChainID}, 2, []peer.ID{dummyPeerID2, dummyPeerID3}}, + {"TWrongPolarisAddr", args{true, []string{"/ip4/256.256.1.1/tcp/8915/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD"}, &customChainID}, 0, []peer.ID{}}, + {"TWrongPolarisAddr2", args{true, []string{"/egwgew5/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD"}, &customChainID}, 0, []peer.ID{}}, + {"TWrongPolarisAddr3", args{true, []string{"/dns/nowhere1234.io/tcp/8915/p2p/16Uiu2HAmU8Wc925gZ5QokM4sGDKjysdPwRCQFoYobvoVnyutccCD"}, &customChainID}, 0, []peer.ID{}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + mockNT := p2pmock.NewMockNetworkTransport(ctrl) + pmapDummyNTC.nt = mockNT + pmapDummyNTC.chainID = tt.args.chainID + + cfg := config.NewServerContext("", "").GetDefaultP2PConfig() + cfg.NPUsePolaris = tt.args.use + cfg.NPAddPolarises = tt.args.polarises + + pcs := NewPolarisConnectSvc(cfg, pmapDummyNTC) + + if len(pcs.mapServers) != tt.wantCnt { + t.Errorf("NewPolarisConnectSvc() = %v, want %v", len(pcs.mapServers), tt.wantCnt) + } + for _, wantPeerID := range tt.peerIDs { + found := false + for _, polarisMeta := range pcs.mapServers { + if wantPeerID == polarisMeta.ID { + found = true + break + } + } + if !found { + t.Errorf("initSvc() want exist %v but not ", wantPeerID) + } + } + }) + } +} + +func TestPolarisConnectSvc_BeforeStop(t *testing.T) { + + type fields struct { + BaseComponent *component.BaseComponent + } + tests := []struct { + name string + fields fields + + calledStreamHandler bool + }{ + {"TNot", fields{}, false}, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + + mockNT := p2pmock.NewMockNetworkTransport(ctrl) + pmapDummyNTC.nt = mockNT + pms := NewPolarisConnectSvc(pmapDummyCfg.P2P, pmapDummyNTC) + + mockNT.EXPECT().AddStreamHandler(common.PolarisPingSub, gomock.Any()).Times(1) + mockNT.EXPECT().RemoveStreamHandler(common.PolarisPingSub).Times(1) + + pms.AfterStart() + + pms.BeforeStop() + + ctrl.Finish() + }) + } +} diff --git a/polaris/common/consts.go b/polaris/common/consts.go new file mode 100644 index 000000000..bfed61e15 --- /dev/null +++ b/polaris/common/consts.go @@ -0,0 +1,39 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package common + +import "github.com/aergoio/aergo/types" + +var ( + // 89.16 is ceiling of declination of Polaris + MainnetMapServer = []string{ + "/dns/mainnet-polaris.aergo.io/tcp/8916/p2p/16Uiu2HAkuxyDkMTQTGFpmnex2SdfTVzYfPztTyK339rqUdsv3ZUa", + } + + // 89.16 is ceiling of declination of Polaris + TestnetMapServer = []string{ + "/dns/polaris.aergo.io/tcp/8916/p2p/16Uiu2HAkvJTHFuJXxr15rFEHsJWnyn1QvGatW2E9ED9Mvy4HWjVF", + } + + // Hardcoded chainID of ONE MAINNET and ONE TESTNET + ONEMainNet types.ChainID + ONETestNet types.ChainID +) + +func init() { + mnGen := types.GetMainNetGenesis() + if mnGen == nil { + panic("Failed to get MainNet GenesisInfo") + } + ONEMainNet = mnGen.ID + + tnGen := types.GetTestNetGenesis() + if tnGen == nil { + panic("Failed to get TestNet GenesisInfo") + } + ONETestNet = tnGen.ID +} + diff --git a/polaris/common/message.go b/polaris/common/message.go new file mode 100644 index 000000000..e8efc7d5a --- /dev/null +++ b/polaris/common/message.go @@ -0,0 +1,64 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package common + +import ( + "github.com/aergoio/aergo/p2p/p2pcommon" + "time" +) + +// PolarisMessage is data struct for transferring between polaris server and client. +// as of 2019.04.23, this is copy of MessageValue. +type PolarisMessage struct { + subProtocol p2pcommon.SubProtocol + // Length is lenght of payload + length uint32 + // timestamp is unix time (precision of second) + timestamp int64 + // ID is 16 bytes unique identifier + id p2pcommon.MsgID + // OriginalID is message id of request which trigger this message. it will be all zero, if message is request or notice. + originalID p2pcommon.MsgID + + // marshaled by google protocol buffer v3. object is determined by Subprotocol + payload []byte +} + + +// NewPolarisMessage create a new object +func NewPolarisMessage(msgID p2pcommon.MsgID, protocol p2pcommon.SubProtocol, payload []byte) *PolarisMessage { + return &PolarisMessage{id: msgID, timestamp:time.Now().UnixNano(), subProtocol:protocol,payload:payload,length:uint32(len(payload))} +} +func NewPolarisRespMessage(msgID , orgReqID p2pcommon.MsgID, protocol p2pcommon.SubProtocol, payload []byte) *PolarisMessage { + return &PolarisMessage{id: msgID, originalID:orgReqID, timestamp:time.Now().UnixNano(), subProtocol:protocol,payload:payload,length:uint32(len(payload))} +} + +func (m *PolarisMessage) Subprotocol() p2pcommon.SubProtocol { + return m.subProtocol +} + +func (m *PolarisMessage) Length() uint32 { + return m.length + +} + +func (m *PolarisMessage) Timestamp() int64 { + return m.timestamp +} + +func (m *PolarisMessage) ID() p2pcommon.MsgID { + return m.id +} + +func (m *PolarisMessage) OriginalID() p2pcommon.MsgID { + return m.originalID +} + +func (m *PolarisMessage) Payload() []byte { + return m.payload +} + +var _ p2pcommon.Message = (*PolarisMessage)(nil) diff --git a/p2p/pmap/polarisprotocol.go b/polaris/common/polarisprotocol.go similarity index 86% rename from p2p/pmap/polarisprotocol.go rename to polaris/common/polarisprotocol.go index e6a4f9bf9..9bfb95674 100644 --- a/p2p/pmap/polarisprotocol.go +++ b/polaris/common/polarisprotocol.go @@ -3,11 +3,12 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package common import ( "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/libp2p/go-libp2p-protocol" + "time" ) const ( @@ -26,3 +27,6 @@ const ( MapQuery p2pcommon.SubProtocol = 0x0100 + iota MapResponse ) + +const PolarisConnectionTTL = time.Second * 30 + diff --git a/p2p/pmap/genesisreader.go b/polaris/server/genesisreader.go similarity index 98% rename from p2p/pmap/genesisreader.go rename to polaris/server/genesisreader.go index 95515c11e..08e066313 100644 --- a/p2p/pmap/genesisreader.go +++ b/polaris/server/genesisreader.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "encoding/json" diff --git a/p2p/pmap/genesisreader_test.go b/polaris/server/genesisreader_test.go similarity index 98% rename from p2p/pmap/genesisreader_test.go rename to polaris/server/genesisreader_test.go index 66d8844c0..4681cc527 100644 --- a/p2p/pmap/genesisreader_test.go +++ b/polaris/server/genesisreader_test.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "testing" diff --git a/p2p/pmap/healthcheck.go b/polaris/server/healthcheck.go similarity index 94% rename from p2p/pmap/healthcheck.go rename to polaris/server/healthcheck.go index 11ecd77b8..9ef1920d5 100644 --- a/p2p/pmap/healthcheck.go +++ b/polaris/server/healthcheck.go @@ -3,9 +3,10 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( + "github.com/aergoio/aergo/polaris/common" "sync" "time" @@ -74,7 +75,7 @@ func (hcm *healthCheckManager) checkPeers() { wg.Add(len(toCheck)) for _, ps := range toCheck { // TODO make a pool and limit count of concurrent pings - go ps.check(wg, PolarisConnectionTTL) + go ps.check(wg, common.PolarisConnectionTTL) } wg.Wait() hcm.logger.Debug().Msg("Finished checks") diff --git a/p2p/pmap/healthcheck_test.go b/polaris/server/healthcheck_test.go similarity index 99% rename from p2p/pmap/healthcheck_test.go rename to polaris/server/healthcheck_test.go index f2285eac0..13d671253 100644 --- a/p2p/pmap/healthcheck_test.go +++ b/polaris/server/healthcheck_test.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "reflect" diff --git a/p2p/pmap/litentcontainer.go b/polaris/server/litentcontainer.go similarity index 93% rename from p2p/pmap/litentcontainer.go rename to polaris/server/litentcontainer.go index af0febf55..9e839d3c0 100644 --- a/p2p/pmap/litentcontainer.go +++ b/polaris/server/litentcontainer.go @@ -3,14 +3,15 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( + "github.com/aergoio/aergo/p2p/transport" "sync" "time" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pkey" peer "github.com/libp2p/go-libp2p-peer" "github.com/aergoio/aergo-actor/actor" @@ -101,7 +102,7 @@ func (lntc *LiteContainerService) init(cfg *config.Config) { lntc.Logger.Info().Str("genesis", chainID.ToJSON()).Msg("genesis block loaded") - netTransport := p2p.NewNetworkTransport(cfg.P2P, lntc.Logger) + netTransport := transport.NewNetworkTransport(cfg.P2P, lntc.Logger) lntc.mutex.Lock() lntc.nt = netTransport @@ -122,7 +123,7 @@ func (lntc *LiteContainerService) Receive(context actor.Context) { // TODO need refactoring. this code is copied from subprotcoladdrs.go func (lntc *LiteContainerService) checkAndAddPeerAddresses(peers []*types.PeerAddress) { - selfPeerID := lntc.nt.SelfNodeID() + selfPeerID := p2pkey.NodeID() peerMetas := make([]p2pcommon.PeerMeta, 0, len(peers)) for _, rPeerAddr := range peers { rPeerID := peer.ID(rPeerAddr.PeerID) @@ -151,7 +152,7 @@ func (lntc *LiteContainerService) FutureRequest(actor string, msg interface{}, t // FutureRequestDefaultTimeout implement interface method of ActorService func (lntc *LiteContainerService) FutureRequestDefaultTimeout(actor string, msg interface{}) *actor.Future { - return lntc.RequestToFuture(actor, msg, p2p.DefaultActorMsgTTL) + return lntc.RequestToFuture(actor, msg, p2pcommon.DefaultActorMsgTTL) } // CallRequest implement interface method of ActorService @@ -162,6 +163,6 @@ func (lntc *LiteContainerService) CallRequest(actor string, msg interface{}, tim // CallRequest implement interface method of ActorService func (lntc *LiteContainerService) CallRequestDefaultTimeout(actor string, msg interface{}) (interface{}, error) { - future := lntc.RequestToFuture(actor, msg, p2p.DefaultActorMsgTTL) + future := lntc.RequestToFuture(actor, msg, p2pcommon.DefaultActorMsgTTL) return future.Result() } diff --git a/p2p/pmap/mapservice.go b/polaris/server/mapservice.go similarity index 88% rename from p2p/pmap/mapservice.go rename to polaris/server/mapservice.go index 187730b7b..ee2b3158a 100644 --- a/p2p/pmap/mapservice.go +++ b/polaris/server/mapservice.go @@ -3,11 +3,12 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "bufio" "fmt" + "github.com/aergoio/aergo/p2p/v030" "math" "sync" "time" @@ -16,22 +17,20 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/config" "github.com/aergoio/aergo/message" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/subproto" "github.com/aergoio/aergo/pkg/component" + "github.com/aergoio/aergo/polaris/common" "github.com/aergoio/aergo/types" "github.com/gofrs/uuid" - "github.com/golang/protobuf/proto" inet "github.com/libp2p/go-libp2p-net" peer "github.com/libp2p/go-libp2p-peer" ) // internal const ( - PolarisConnectionTTL = time.Second * 30 - PolarisPingTTL = PolarisConnectionTTL >> 1 + PolarisPingTTL = common.PolarisConnectionTTL >> 1 // polaris will return peers list at most this number ResponseMaxPeerLimit = 500 @@ -47,36 +46,6 @@ var ( EmptyMsgID = p2pcommon.MsgID(uuid.Nil) ) -var ( - // 89.16 is ceiling of declination of Polaris - MainnetMapServer = []string{ - "/dns/mainnet-polaris.aergo.io/tcp/8916/p2p/16Uiu2HAkuxyDkMTQTGFpmnex2SdfTVzYfPztTyK339rqUdsv3ZUa", - } - - // 89.16 is ceiling of declination of Polaris - TestnetMapServer = []string{ - "/dns/polaris.aergo.io/tcp/8916/p2p/16Uiu2HAkvJTHFuJXxr15rFEHsJWnyn1QvGatW2E9ED9Mvy4HWjVF", - } - - // Hardcoded chainID of ONE MAINNET and ONE TESTNET - ONEMainNet types.ChainID - ONETestNet types.ChainID -) - -func init() { - mnGen := types.GetMainNetGenesis() - if mnGen == nil { - panic("Failed to get MainNet GenesisInfo") - } - ONEMainNet = mnGen.ID - - tnGen := types.GetTestNetGenesis() - if tnGen == nil { - panic("Failed to get TestNet GenesisInfo") - } - ONETestNet = tnGen.ID -} - type mapService interface { getPeerCheckers() []peerChecker registerPeer(receivedMeta p2pcommon.PeerMeta) error @@ -130,15 +99,15 @@ func (pms *PeerMapService) BeforeStart() {} func (pms *PeerMapService) AfterStart() { pms.nt = pms.ntc.GetNetworkTransport() - pms.Logger.Info().Str("version", string(PolarisMapSub)).Msg("Starting polaris listening") - pms.nt.AddStreamHandler(PolarisMapSub, pms.onConnect) + pms.Logger.Info().Str("version", string(common.PolarisMapSub)).Msg("Starting polaris listening") + pms.nt.AddStreamHandler(common.PolarisMapSub, pms.onConnect) pms.hc.Start() } func (pms *PeerMapService) BeforeStop() { if pms.nt != nil { pms.hc.Stop() - pms.nt.RemoveStreamHandler(PolarisMapSub) + pms.nt.RemoveStreamHandler(common.PolarisMapSub) } } @@ -154,7 +123,7 @@ func (pms *PeerMapService) onConnect(s inet.Stream) { remotePeerMeta := p2pcommon.PeerMeta{ID: peerID} pms.Logger.Debug().Str("addr", remoteAddrStr).Str(p2putil.LogPeerID, peerID.String()).Msg("Received map query") - rw := p2p.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + rw := v030.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) defer s.Close() // receive input @@ -191,7 +160,7 @@ func (pms *PeerMapService) readRequest(meta p2pcommon.PeerMeta, rd p2pcommon.Msg return nil, nil, err } queryReq := &types.MapQuery{} - err = p2putil.UnmarshalMessage(data.Payload(), queryReq) + err = p2putil.UnmarshalMessageBody(data.Payload(), queryReq) if err != nil { return data, nil, err } @@ -205,6 +174,7 @@ func (pms *PeerMapService) handleQuery(container p2pcommon.Message, query *types return nil, fmt.Errorf("malformed query %v", query) } receivedMeta := p2pcommon.FromPeerAddress(query.Status.Sender) + receivedMeta.Version = query.Status.Version maxPeers := int(query.Size) if maxPeers <= 0 { return nil, fmt.Errorf("invalid argument count %d", maxPeers) @@ -276,7 +246,7 @@ func (pms *PeerMapService) registerPeer(receivedMeta p2pcommon.PeerMeta) error { prev, ok := pms.peerRegistry[peerID] if !ok { newState := &peerState{connected: now, PeerMapService: pms, meta: receivedMeta, addr: receivedMeta.ToPeerAddress(), lCheckTime: now} - pms.Logger.Info().Str("meta", p2putil.ShortMetaForm(receivedMeta)).Msg("Registering new peer info") + pms.Logger.Info().Str("meta", p2putil.ShortMetaForm(receivedMeta)).Str("version",receivedMeta.GetVersion()).Msg("Registering new peer info") pms.peerRegistry[peerID] = newState } else { if prev.meta != receivedMeta { @@ -299,7 +269,7 @@ func (pms *PeerMapService) unregisterPeer(peerID peer.ID) { func (pms *PeerMapService) writeResponse(reqContainer p2pcommon.Message, meta p2pcommon.PeerMeta, resp *types.MapResponse, wt p2pcommon.MsgWriter) error { msgID := p2pcommon.NewMsgID() - respMsg, err := createV030Message(msgID, reqContainer.ID(), MapResponse, resp) + respMsg, err := createV030Message(msgID, reqContainer.ID(), common.MapResponse, resp) if err != nil { return err } @@ -308,13 +278,13 @@ func (pms *PeerMapService) writeResponse(reqContainer p2pcommon.Message, meta p2 } // TODO code duplication. it can result in a bug. -func createV030Message(msgID, orgID p2pcommon.MsgID, subProtocol p2pcommon.SubProtocol, innerMsg proto.Message) (*p2p.V030Message, error) { - bytes, err := p2putil.MarshalMessage(innerMsg) +func createV030Message(msgID, orgID p2pcommon.MsgID, subProtocol p2pcommon.SubProtocol, innerMsg p2pcommon.MessageBody) (p2pcommon.Message, error) { + bytes, err := p2putil.MarshalMessageBody(innerMsg) if err != nil { return nil, err } - msg := p2p.NewV030Message(msgID, orgID, time.Now().UnixNano(), subProtocol, bytes) + msg := common.NewPolarisRespMessage(msgID, orgID, subProtocol, bytes) return msg, nil } @@ -339,7 +309,7 @@ func (pms *PeerMapService) onPing(s inet.Stream) { peerID := s.Conn().RemotePeer() pms.Logger.Debug().Str(p2putil.LogPeerID, peerID.String()).Msg("Received ping from polaris (maybe)") - rw := p2p.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + rw := v030.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) defer s.Close() req, err := rw.ReadMsg() @@ -347,7 +317,7 @@ func (pms *PeerMapService) onPing(s inet.Stream) { return } pingReq := &types.Ping{} - err = p2putil.UnmarshalMessage(req.Payload(), pingReq) + err = p2putil.UnmarshalMessageBody(req.Payload(), pingReq) if err != nil { return } @@ -375,7 +345,7 @@ func (pms *PeerMapService) getCurrentPeers(param *message.CurrentListMsg) *types pms.rwmutex.Lock() pms.rwmutex.Unlock() for _, rPeer := range pms.peerRegistry { - pList[addSize] = &types.PolarisPeer{Address: &rPeer.addr, Connected: rPeer.connected.UnixNano(), LastCheck: rPeer.lastCheck().UnixNano()} + pList[addSize] = &types.PolarisPeer{Address: &rPeer.addr, Connected: rPeer.connected.UnixNano(), LastCheck: rPeer.lastCheck().UnixNano(), Verion:rPeer.meta.Version} addSize++ if addSize >= listSize { break diff --git a/p2p/pmap/mapservice_test.go b/polaris/server/mapservice_test.go similarity index 96% rename from p2p/pmap/mapservice_test.go rename to polaris/server/mapservice_test.go index 4ebdd291d..b2834d83f 100644 --- a/p2p/pmap/mapservice_test.go +++ b/polaris/server/mapservice_test.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "fmt" @@ -12,10 +12,10 @@ import ( "testing" "github.com/aergoio/aergo/config" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2pmock" "github.com/aergoio/aergo/pkg/component" + "github.com/aergoio/aergo/polaris/common" "github.com/aergoio/aergo/types" "github.com/golang/mock/gomock" "github.com/golang/protobuf/proto" @@ -64,8 +64,8 @@ func TestPeerMapService_BeforeStop(t *testing.T) { pmapDummyNTC.nt = mockNT pms := NewPolarisService(pmapDummyCfg, pmapDummyNTC) - mockNT.EXPECT().AddStreamHandler(PolarisMapSub, gomock.Any()).Times(1) - mockNT.EXPECT().RemoveStreamHandler(PolarisMapSub).Times(1) + mockNT.EXPECT().AddStreamHandler(common.PolarisMapSub, gomock.Any()).Times(1) + mockNT.EXPECT().RemoveStreamHandler(common.PolarisMapSub).Times(1) pms.AfterStart() @@ -99,12 +99,12 @@ func TestPeerMapService_readRequest(t *testing.T) { ctrl := gomock.NewController(t) mockNT := p2pmock.NewMockNetworkTransport(ctrl) pmapDummyNTC.nt = mockNT - mockNT.EXPECT().AddStreamHandler(PolarisMapSub, gomock.Any()).Times(1) + mockNT.EXPECT().AddStreamHandler(common.PolarisMapSub, gomock.Any()).Times(1) pms := NewPolarisService(pmapDummyCfg, pmapDummyNTC) pms.AfterStart() - msgStub := &p2p.V030Message{} + msgStub := &p2pcommon.MessageValue{} mockRd := p2pmock.NewMockMsgReader(ctrl) mockRd.EXPECT().ReadMsg().Times(1).Return(msgStub, tt.args.readErr) @@ -135,9 +135,9 @@ func TestPeerMapService_readRequest(t *testing.T) { } func TestPeerMapService_handleQuery(t *testing.T) { - mainnetbytes, err := ONEMainNet.Bytes() + mainnetbytes, err := common.ONEMainNet.Bytes() if err != nil { - t.Error("mainnet var is not set properly", ONEMainNet) + t.Error("mainnet var is not set properly", common.ONEMainNet) } dummyPeerID2, err := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") @@ -175,10 +175,10 @@ func TestPeerMapService_handleQuery(t *testing.T) { mockStream := p2pmock.NewMockStream(ctrl) mockStream.EXPECT().Write(gomock.Any()).MaxTimes(1).Return(100, nil) mockStream.EXPECT().Close().MaxTimes(1).Return(nil) - pmapDummyNTC.chainID = &ONEMainNet + pmapDummyNTC.chainID = &common.ONEMainNet pmapDummyNTC.nt = mockNT mockNT.EXPECT().AddStreamHandler(gomock.Any(), gomock.Any()) - mockNT.EXPECT().GetOrCreateStreamWithTTL(gomock.Any(), PolarisPingSub, gomock.Any()).Return(mockStream, nil) + mockNT.EXPECT().GetOrCreateStreamWithTTL(gomock.Any(), common.PolarisPingSub, gomock.Any()).Return(mockStream, nil) pms := NewPolarisService(pmapDummyCfg, pmapDummyNTC) pms.AfterStart() @@ -524,7 +524,7 @@ func Test_createV030Message(t *testing.T) { tests := []struct { name string args args - want *p2p.V030Message + want *p2pcommon.MessageValue wantErr bool }{ // TODO: Add test cases. diff --git a/p2p/pmap/peerstate.go b/polaris/server/peerstate.go similarity index 92% rename from p2p/pmap/peerstate.go rename to polaris/server/peerstate.go index 973265d57..755295fd1 100644 --- a/p2p/pmap/peerstate.go +++ b/polaris/server/peerstate.go @@ -3,16 +3,17 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "bufio" "fmt" + "github.com/aergoio/aergo/p2p/v030" + "github.com/aergoio/aergo/polaris/common" "sync" "sync/atomic" "time" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2putil" "github.com/aergoio/aergo/p2p/subproto" @@ -74,7 +75,7 @@ func (hc *peerState) check(wg *sync.WaitGroup, timeout time.Duration) { func (hc *peerState) checkConnect(timeout time.Duration) (*types.Ping, error) { hc.Logger.Debug().Str(p2putil.LogPeerID, p2putil.ShortForm(hc.meta.ID)).Msg("staring up healthcheck") hc.lCheckTime = time.Now() - s, err := hc.nt.GetOrCreateStreamWithTTL(hc.meta, PolarisPingSub, PolarisPingTTL) + s, err := hc.nt.GetOrCreateStreamWithTTL(hc.meta, PolarisPingTTL, common.PolarisPingSub) if err != nil { hc.contFail++ hc.Logger.Debug().Err(err).Msg("Healthcheck failed to get network stream") @@ -83,7 +84,7 @@ func (hc *peerState) checkConnect(timeout time.Duration) (*types.Ping, error) { } defer s.Close() - rw := p2p.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) + rw := v030.NewV030ReadWriter(bufio.NewReader(s), bufio.NewWriter(s)) pc := &pingChecker{peerState: hc, rw: rw} pingResp, err := p2putil.InvokeWithTimer(pc, time.NewTimer(timeout)) if pingResp.(*types.Ping) == nil { @@ -126,7 +127,7 @@ func (hc *peerState) receivePingResp(reqID p2pcommon.MsgID, rd p2pcommon.MsgRead return nil, nil, fmt.Errorf("Not expected response %s : req_id=%s", resp.Subprotocol().String(), resp.OriginalID().String()) } pingResp := &types.Ping{} - err = p2putil.UnmarshalMessage(resp.Payload(), pingResp) + err = p2putil.UnmarshalMessageBody(resp.Payload(), pingResp) if err != nil { return resp, nil, err } diff --git a/p2p/pmap/peerstate_test.go b/polaris/server/peerstate_test.go similarity index 95% rename from p2p/pmap/peerstate_test.go rename to polaris/server/peerstate_test.go index d8888e43f..1bc5e63ad 100644 --- a/p2p/pmap/peerstate_test.go +++ b/polaris/server/peerstate_test.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "fmt" @@ -11,7 +11,6 @@ import ( "time" "github.com/aergoio/aergo-lib/log" - "github.com/aergoio/aergo/p2p" "github.com/aergoio/aergo/p2p/p2pcommon" "github.com/aergoio/aergo/p2p/p2pmock" "github.com/aergoio/aergo/p2p/p2putil" @@ -79,7 +78,7 @@ func Test_pingChecker_DoCall(t *testing.T) { pc.Cancel() time.Sleep(time.Millisecond << 4) } - ret := p2p.NewV030Message(EmptyMsgID, reqID, time.Now().UnixNano(), tt.args.respSub, []byte{}) + ret := p2pcommon.NewMessageValue(tt.args.respSub, EmptyMsgID, reqID, time.Now().UnixNano(), []byte{}) return ret, tt.args.readRet2 }) @@ -150,7 +149,7 @@ func Test_pingChecker_DoCallWithTimer(t *testing.T) { if tt.args.readWait > 0 { time.Sleep(time.Second) } - ret := p2p.NewV030Message(EmptyMsgID, reqID, time.Now().UnixNano(), tt.args.respSub, []byte{}) + ret := p2pcommon.NewMessageValue(tt.args.respSub, EmptyMsgID, reqID, time.Now().UnixNano(), []byte{}) return ret, tt.args.readRet2 }) diff --git a/p2p/pmap/prpc.go b/polaris/server/prpc.go similarity index 99% rename from p2p/pmap/prpc.go rename to polaris/server/prpc.go index fa1ff524c..640028721 100644 --- a/p2p/pmap/prpc.go +++ b/polaris/server/prpc.go @@ -3,7 +3,7 @@ * @copyright defined in aergo/LICENSE.txt */ -package pmap +package server import ( "context" diff --git a/rpc/grpcserver.go b/rpc/grpcserver.go index 738c7af7a..3e242ed9e 100644 --- a/rpc/grpcserver.go +++ b/rpc/grpcserver.go @@ -21,6 +21,7 @@ import ( "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/chain" "github.com/aergoio/aergo/consensus" + "github.com/aergoio/aergo/consensus/impl/raftv2" "github.com/aergoio/aergo/internal/common" "github.com/aergoio/aergo/message" "github.com/aergoio/aergo/p2p/metric" @@ -28,6 +29,7 @@ import ( "github.com/aergoio/aergo/pkg/component" "github.com/aergoio/aergo/types" "github.com/golang/protobuf/ptypes/timestamp" + "github.com/libp2p/go-libp2p-peer" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -37,7 +39,8 @@ var ( ) var ( - ErrUninitAccessor = errors.New("accessor is not initilized") + ErrUninitAccessor = errors.New("accessor is not initilized") + ErrNotSupportedConsensus = errors.New("not supported by this consensus") ) type EventStream struct { @@ -172,11 +175,7 @@ func (rpc *AergoRPCService) ListBlockMetadata(ctx context.Context, in *types.Lis } var metas []*types.BlockMetadata for _, block := range blocks { - metas = append(metas, &types.BlockMetadata{ - Hash: block.BlockHash(), - Header: block.GetHeader(), - Txcount: int32(len(block.GetBody().GetTxs())), - }) + metas = append(metas, block.GetMetadata()) } return &types.BlockMetadataList{Blocks: metas}, nil } @@ -294,7 +293,7 @@ func (rpc *AergoRPCService) BroadcastToListBlockMetadataStream(meta *types.Block rpc.blockMetadataStreamLock.RUnlock() } -// real-time streaming most recent block header +// ListBlockStream starts a stream of new blocks func (rpc *AergoRPCService) ListBlockStream(in *types.Empty, stream types.AergoRPCService_ListBlockStreamServer) error { streamId := atomic.AddUint32(&rpc.streamID, 1) rpc.blockStreamLock.Lock() @@ -314,20 +313,21 @@ func (rpc *AergoRPCService) ListBlockStream(in *types.Empty, stream types.AergoR } } +// ListBlockMetadataStream starts a stream of new blocks' metadata func (rpc *AergoRPCService) ListBlockMetadataStream(in *types.Empty, stream types.AergoRPCService_ListBlockMetadataStreamServer) error { - streamId := atomic.AddUint32(&rpc.streamID, 1) + streamID := atomic.AddUint32(&rpc.streamID, 1) rpc.blockMetadataStreamLock.Lock() - rpc.blockMetadataStream[streamId] = stream + rpc.blockMetadataStream[streamID] = stream rpc.blockMetadataStreamLock.Unlock() - logger.Info().Uint32("id", streamId).Msg("block meta stream added") + logger.Info().Uint32("id", streamID).Msg("block meta stream added") for { select { case <-stream.Context().Done(): rpc.blockMetadataStreamLock.Lock() - delete(rpc.blockMetadataStream, streamId) + delete(rpc.blockMetadataStream, streamID) rpc.blockMetadataStreamLock.Unlock() - logger.Info().Uint32("id", streamId).Msg("block meta stream deleted") + logger.Info().Uint32("id", streamID).Msg("block meta stream deleted") return nil } } @@ -396,12 +396,7 @@ func (rpc *AergoRPCService) GetBlockMetadata(ctx context.Context, in *types.Sing if err != nil { return nil, err } - - meta := &types.BlockMetadata{ - Hash: block.BlockHash(), - Header: block.GetHeader(), - Txcount: int32(len(block.GetBody().GetTxs())), - } + meta := block.GetMetadata() return meta, nil } @@ -807,7 +802,7 @@ func (rpc *AergoRPCService) GetPeers(ctx context.Context, in *types.PeersParams) ret := &types.PeerList{Peers: make([]*types.Peer, 0, len(rsp.Peers))} for _, pi := range rsp.Peers { blkNotice := &types.NewBlockNotice{BlockHash: pi.LastBlockHash, BlockNo: pi.LastBlockNumber} - peer := &types.Peer{Address: pi.Addr, State: int32(pi.State), Bestblock: blkNotice, LashCheck: pi.CheckTime.UnixNano(), Hidden: pi.Hidden, Selfpeer: pi.Self} + peer := &types.Peer{Address: pi.Addr, State: int32(pi.State), Bestblock: blkNotice, LashCheck: pi.CheckTime.UnixNano(), Hidden: pi.Hidden, Selfpeer: pi.Self, Version: pi.Version} ret.Peers = append(ret.Peers, peer) } @@ -889,7 +884,7 @@ func (rpc *AergoRPCService) GetStaking(ctx context.Context, in *types.AccountAdd func (rpc *AergoRPCService) GetNameInfo(ctx context.Context, in *types.Name) (*types.NameInfo, error) { result, err := rpc.hub.RequestFuture(message.ChainSvc, - &message.GetNameInfo{Name: in.Name}, defaultActorTimeout, "rpc.(*AergoRPCService).GetName").Result() + &message.GetNameInfo{Name: in.Name, BlockNo: in.BlockNo}, defaultActorTimeout, "rpc.(*AergoRPCService).GetName").Result() if err != nil { return nil, err } @@ -1040,8 +1035,7 @@ func (rpc *AergoRPCService) GetServerInfo(ctx context.Context, in *types.KeyPara return rsp, nil } - -// Blockchain handle rpc request blockchain. It has no additional input parameter +// GetConsensusInfo handle rpc request blockchain. It has no additional input parameter func (rpc *AergoRPCService) GetConsensusInfo(ctx context.Context, in *types.Empty) (*types.ConsensusInfo, error) { if rpc.consensusAccessor == nil { return nil, ErrUninitAccessor @@ -1049,3 +1043,32 @@ func (rpc *AergoRPCService) GetConsensusInfo(ctx context.Context, in *types.Empt return rpc.consensusAccessor.ConsensusInfo(), nil } + +// ChainStat handles rpc request chainstat. +func (rpc *AergoRPCService) ChainStat(ctx context.Context, in *types.Empty) (*types.ChainStats, error) { + ca := rpc.actorHelper.GetChainAccessor() + if ca == nil { + return nil, ErrUninitAccessor + } + return &types.ChainStats{Report: ca.GetChainStats()}, nil +} + +func (rpc *AergoRPCService) ChangeMembership(ctx context.Context, in *types.MembershipChange) (*types.MembershipChangeReply, error) { + if rpc.consensusAccessor == nil { + return nil, ErrUninitAccessor + } + + if genesisInfo := rpc.actorHelper.GetChainAccessor().GetGenesisInfo(); genesisInfo != nil { + if genesisInfo.ID.Consensus != raftv2.GetName() { + return nil, ErrNotSupportedConsensus + } + } + + member, err := rpc.consensusAccessor.ConfChange(in) + if err != nil { + return nil, err + } + + reply := &types.MembershipChangeReply{Attr: &types.MemberAttr{ID: uint64(member.ID), Name: member.Name, Url: member.Url, PeerID: []byte(peer.ID(member.PeerID))}} + return reply, nil +} diff --git a/rpc/rpc.go b/rpc/rpc.go index a8a8ba36c..75b37f1bc 100644 --- a/rpc/rpc.go +++ b/rpc/rpc.go @@ -7,8 +7,6 @@ package rpc import ( "fmt" - "github.com/aergoio/aergo/p2p" - "github.com/aergoio/aergo/p2p/p2pcommon" "net" "net/http" "reflect" @@ -16,6 +14,9 @@ import ( "strings" "time" + "github.com/aergoio/aergo/p2p/p2pcommon" + "github.com/aergoio/aergo/p2p/p2pkey" + "github.com/aergoio/aergo-actor/actor" "github.com/aergoio/aergo/config" "github.com/aergoio/aergo/consensus" @@ -132,11 +133,7 @@ func (ns *RPC) Receive(context actor.Context) { case *types.Block: server := ns.actualServer server.BroadcastToListBlockStream(msg) - meta := &types.BlockMetadata{ - Hash: msg.BlockHash(), - Header: msg.GetHeader(), - Txcount: int32(len(msg.GetBody().GetTxs())), - } + meta := msg.GetMetadata() server.BroadcastToListBlockMetadataStream(meta) case []*types.Event: server := ns.actualServer @@ -218,14 +215,14 @@ func (ns *RPC) serve() { return } -func (ns *RPC) CollectServerInfo(categories []string) *types.ServerInfo{ +func (ns *RPC) CollectServerInfo(categories []string) *types.ServerInfo { // 3 items are needed statusInfo := make(map[string]string) rsp, err := ns.CallRequestDefaultTimeout(message.P2PSvc, &message.GetSelf{}) statusInfo["version"] = ns.version if err != nil { ns.Logger.Error().Err(err).Msg("p2p actor error") - statusInfo["id"] = p2p.NodeSID() + statusInfo["id"] = p2pkey.NodeSID() } else { meta := rsp.(p2pcommon.PeerMeta) statusInfo["id"] = meta.ID.Pretty() @@ -233,9 +230,9 @@ func (ns *RPC) CollectServerInfo(categories []string) *types.ServerInfo{ statusInfo["port"] = strconv.Itoa(int(meta.Port)) } configInfo := make(map[string]*types.ConfigItem) - types.AddCategory(configInfo, "base").AddBool("personal",ns.conf.BaseConfig.Personal) - types.AddCategory(configInfo, "account").AddInt("unlocktimeout",int(ns.conf.Account.UnlockTimeout)) - return &types.ServerInfo{Status: statusInfo, Config:configInfo} + types.AddCategory(configInfo, "base").AddBool("personal", ns.conf.BaseConfig.Personal) + types.AddCategory(configInfo, "account").AddInt("unlocktimeout", int(ns.conf.Account.UnlockTimeout)) + return &types.ServerInfo{Status: statusInfo, Config: configInfo} } const defaultTTL = time.Second * 4 diff --git a/state/chainstatedb.go b/state/chainstatedb.go index f83a62257..38c43bee9 100644 --- a/state/chainstatedb.go +++ b/state/chainstatedb.go @@ -178,7 +178,7 @@ func (sdb *ChainStateDB) UpdateRoot(bstate *BlockState) error { return nil } -func (sdb *ChainStateDB) Rollback(targetBlockRoot []byte) error { +func (sdb *ChainStateDB) SetRoot(targetBlockRoot []byte) error { sdb.Lock() defer sdb.Unlock() diff --git a/state/contract.go b/state/contract.go index 9ae5f469d..5eae621f5 100644 --- a/state/contract.go +++ b/state/contract.go @@ -42,6 +42,11 @@ func (states *StateDB) GetSystemAccountState() (*ContractState, error) { return states.OpenContractStateAccount(types.ToAccountID([]byte(types.AergoSystem))) } +// GetNameAccountState returns the ContractState of the AERGO name account. +func (states *StateDB) GetNameAccountState() (*ContractState, error) { + return states.OpenContractStateAccount(types.ToAccountID([]byte(types.AergoName))) +} + type ContractState struct { *types.State account types.AccountID @@ -90,6 +95,11 @@ func (st *ContractState) GetCode() ([]byte, error) { return st.code, nil } +// HasKey returns existence of the key +func (st *ContractState) HasKey(key []byte) bool { + return st.storage.has(types.GetHashID(key), true) +} + // SetData store key and value pair to the storage. func (st *ContractState) SetData(key, value []byte) error { st.storage.put(newValueEntry(types.GetHashID(key), value)) diff --git a/state/contract_test.go b/state/contract_test.go index 6db0a04f3..bb9981a67 100644 --- a/state/contract_test.go +++ b/state/contract_test.go @@ -162,6 +162,50 @@ func TestContractStateDataDelete(t *testing.T) { assert.NoError(t, err, "stage contract state") } +func TestContractStateHasKey(t *testing.T) { + initTest(t) + defer deinitTest() + testAddress := []byte("test_address") + testBytes := []byte("test_bytes") + testKey := []byte("test_key") + + // open contract state and set test data + contractState, err := stateDB.OpenContractStateAccount(types.ToAccountID(testAddress)) + assert.NoError(t, err, "could not open contract state") + assert.False(t, contractState.HasKey(testKey)) + + err = contractState.SetData(testKey, testBytes) + assert.NoError(t, err, "set data to contract state") + assert.True(t, contractState.HasKey(testKey)) + + // get test data + _, err = contractState.GetData(testKey) + assert.NoError(t, err, "get data from contract state") + assert.True(t, contractState.HasKey(testKey)) + + // delete test data + err = contractState.DeleteData(testKey) + assert.NoError(t, err, "delete data from contract state") + assert.True(t, contractState.HasKey(testKey)) + + // stage contract state + err = stateDB.StageContractState(contractState) + assert.NoError(t, err, "stage contract state") + + // update and commit + err = stateDB.Update() + assert.NoError(t, err, "failed to update stateDB") + err = stateDB.Commit() + assert.NoError(t, err, "failed to commit stateDB") + + // re-open contract state + contractState, err = stateDB.OpenContractState(types.ToAccountID(testAddress), contractState.State) + assert.NoError(t, err, "could not open contract state") + + // check key existence + assert.False(t, contractState.HasKey(testKey)) +} + func TestContractStateEmpty(t *testing.T) { initTest(t) defer deinitTest() diff --git a/state/statebuffer.go b/state/statebuffer.go index 5f52980f8..9a7fecc0b 100644 --- a/state/statebuffer.go +++ b/state/statebuffer.go @@ -109,6 +109,10 @@ func (buffer *stateBuffer) get(key types.HashID) entry { } return nil } +func (buffer *stateBuffer) has(key types.HashID) bool { + _, ok := buffer.indexes[key] + return ok +} func (buffer *stateBuffer) put(et entry) { snapshot := buffer.snapshot() diff --git a/state/statebuffer_test.go b/state/statebuffer_test.go index 61710166d..e21501420 100644 --- a/state/statebuffer_test.go +++ b/state/statebuffer_test.go @@ -132,3 +132,20 @@ func TestBufferRollback(t *testing.T) { assert.Equal(t, []byte{5}, stb.get(k1).Value()) t.Logf("k0: %v, k1: %v", stb.get(k0).Value(), stb.get(k1).Value()) } + +func TestBufferHasKey(t *testing.T) { + stb := newStateBuffer() + assert.False(t, stb.has(k0)) + + stb.put(newValueEntry(k0, []byte{1})) + assert.True(t, stb.has(k0)) // buffer has key + + stb.put(newValueEntryDelete(k0)) + assert.True(t, stb.has(k0)) // buffer has key for ValueEntryDelete + + stb.put(newValueEntry(k0, []byte{2})) + assert.True(t, stb.has(k0)) // buffer has key + + stb.reset() + assert.False(t, stb.has(k0)) // buffer doesn't have key +} diff --git a/state/statedb.go b/state/statedb.go index b1d284693..ce01d491a 100644 --- a/state/statedb.go +++ b/state/statedb.go @@ -6,6 +6,7 @@ package state import ( + "bytes" "errors" "fmt" "math/big" @@ -24,6 +25,10 @@ const ( stateLatest = stateName + ".latest" ) +var ( + stateMarker = []byte{0x54, 0x45} // marker: tail end +) + var ( logger = log.NewLogger(stateName) ) @@ -501,9 +506,33 @@ func (states *StateDB) stage(txn trie.DbTx) error { if err := states.buffer.stage(txn); err != nil { return err } + // set marker + states.setMarker(txn) // reset buffer if err := states.buffer.reset(); err != nil { return err } return nil } + +// setMarker store the marker that represents finalization of the state root. +func (states *StateDB) setMarker(txn trie.DbTx) { + if states.trie.Root == nil { + return + } + // logger.Debug().Str("stateRoot", enc.ToString(states.trie.Root)).Msg("setMarker") + txn.Set(common.Hasher(states.trie.Root), stateMarker) +} + +// HasMarker represents that the state root is finalized or not. +func (states *StateDB) HasMarker(root []byte) bool { + if root == nil { + return false + } + marker := (*states.store).Get(common.Hasher(root)) + if marker != nil && bytes.Equal(marker, stateMarker) { + // logger.Debug().Str("stateRoot", enc.ToString(root)).Str("marker", hex.EncodeToString(marker)).Msg("IsMarked") + return true + } + return false +} diff --git a/state/statedb_test.go b/state/statedb_test.go index 061d447fe..e7369716e 100644 --- a/state/statedb_test.go +++ b/state/statedb_test.go @@ -231,3 +231,21 @@ func TestStateDBParallel(t *testing.T) { } assert.True(t, stateEquals(&testStates[4], st2)) } + +func TestStateDBMarker(t *testing.T) { + initTest(t) + defer deinitTest() + assert.Nil(t, stateDB.GetRoot()) + + for _, v := range testStates { + _ = stateDB.PutState(testAccount, &v) + } + _ = stateDB.Update() + _ = stateDB.Commit() + assert.Equal(t, testRoot, stateDB.GetRoot()) + + assert.True(t, stateDB.HasMarker(stateDB.GetRoot())) + assert.False(t, stateDB.HasMarker(testSecondRoot)) + assert.False(t, stateDB.HasMarker([]byte{})) + assert.False(t, stateDB.HasMarker(nil)) +} diff --git a/state/storage.go b/state/storage.go index deba6a59d..c053ea9b7 100644 --- a/state/storage.go +++ b/state/storage.go @@ -54,6 +54,17 @@ func newBufferedStorage(root []byte, store db.DB) *bufferedStorage { } } +func (storage *bufferedStorage) has(key types.HashID, lookupTrie bool) bool { + if storage.buffer.has(key) { + return true + } + if lookupTrie { + if buf, _ := storage.trie.Get(key.Bytes()); buf != nil { + return true + } + } + return false +} func (storage *bufferedStorage) get(key types.HashID) entry { return storage.buffer.get(key) } diff --git a/state/storage_test.go b/state/storage_test.go index e0782e492..6992bf3c0 100644 --- a/state/storage_test.go +++ b/state/storage_test.go @@ -84,3 +84,39 @@ func TestStorageDelete(t *testing.T) { assert.Equal(t, []byte{0}, storage.get(v2).Hash()) assert.Nil(t, storage.get(v2).Value()) } + +func TestStorageHasKey(t *testing.T) { + storage := newBufferedStorage(nil, nil) + v1 := types.GetHashID([]byte("v1")) + + assert.False(t, storage.has(v1, false)) // check buffer only + assert.False(t, storage.has(v1, true)) // check buffer and trie + + // put entry + storage.put(newValueEntry(v1, []byte{1})) + assert.True(t, storage.has(v1, false)) // buffer has key + assert.True(t, storage.has(v1, true)) // buffer has key + + // update storage and reset buffer + err := storage.update() + assert.NoError(t, err, "failed to update storage") + err = storage.buffer.reset() + assert.NoError(t, err, "failed to reset buffer") + // after update and reset + assert.False(t, storage.has(v1, false)) // buffer doesn't have key + assert.True(t, storage.has(v1, true)) // buffer doesn't have, but trie has key + + // delete entry + storage.put(newValueEntryDelete(v1)) + assert.True(t, storage.has(v1, false)) // buffer has key + assert.True(t, storage.has(v1, true)) // buffer has key + + // update storage and reset buffer + err = storage.update() + assert.NoError(t, err, "failed to update storage") + err = storage.buffer.reset() + assert.NoError(t, err, "failed to reset buffer") + // after update and reset + assert.False(t, storage.has(v1, false)) // buffer doesn't have key + assert.False(t, storage.has(v1, true)) // buffer and trie don't have key +} diff --git a/syncer/blockfetcher_test.go b/syncer/blockfetcher_test.go index de8799fc1..49bc2ad03 100644 --- a/syncer/blockfetcher_test.go +++ b/syncer/blockfetcher_test.go @@ -30,7 +30,7 @@ func TestBlockFetcher_simple(t *testing.T) { syncer := NewTestSyncer(t, localChain, remoteChain, peers, &testCfg) //set ctx manually because finder will be skipped - ctx := types.NewSyncCtx(1, "peer-0", targetNo, uint64(localChain.Best)) + ctx := types.NewSyncCtx(1, "peer-0", targetNo, uint64(localChain.Best), nil) ancestor := remoteChain.Blocks[0] ctx.SetAncestor(ancestor) diff --git a/syncer/hashfetcher_test.go b/syncer/hashfetcher_test.go index 1e267048c..082c47d2f 100644 --- a/syncer/hashfetcher_test.go +++ b/syncer/hashfetcher_test.go @@ -30,7 +30,7 @@ func TestHashFetcher_normal(t *testing.T) { testCfg.debugContext.targetNo = targetNo //set ctx because finder is skipped - ctx := types.NewSyncCtx(1, "peer-0", targetNo, uint64(localChain.Best)) + ctx := types.NewSyncCtx(1, "peer-0", targetNo, uint64(localChain.Best), nil) ancestorInfo := remoteChain.GetBlockInfo(0) syncer := NewTestSyncer(t, localChain, remoteChain, peers, &testCfg) @@ -68,7 +68,7 @@ func TestHashFetcher_quit(t *testing.T) { testCfg.debugContext.BfWaitTime = time.Second * 1000 //set ctx because finder is skipped - ctx := types.NewSyncCtx(1, "peer-0", targetNo, uint64(localChain.Best)) + ctx := types.NewSyncCtx(1, "peer-0", targetNo, uint64(localChain.Best), nil) ancestorInfo := remoteChain.GetBlockInfo(0) syncer := NewTestSyncer(t, localChain, remoteChain, peers, &testCfg) diff --git a/syncer/syncerservice.go b/syncer/syncerservice.go index 2ea4a54b9..b84cdc52b 100644 --- a/syncer/syncerservice.go +++ b/syncer/syncerservice.go @@ -1,6 +1,8 @@ package syncer import ( + "github.com/aergoio/aergo/chain" + "github.com/aergoio/aergo/p2p/p2putil" "runtime/debug" "github.com/aergoio/aergo-lib/log" @@ -121,13 +123,13 @@ func (syncer *Syncer) AfterStart() { func (syncer *Syncer) BeforeStop() { if syncer.isRunning { logger.Info().Msg("syncer BeforeStop") - syncer.Reset() + syncer.Reset(nil) } } -func (syncer *Syncer) Reset() { +func (syncer *Syncer) Reset(err error) { if syncer.isRunning { - logger.Info().Msg("syncer stop#1") + logger.Info().Uint64("targetNo", syncer.ctx.TargetNo).Msg("syncer stop#1") syncer.finder.stop() syncer.hashFetcher.stop() @@ -137,12 +139,29 @@ func (syncer *Syncer) Reset() { syncer.hashFetcher = nil syncer.blockFetcher = nil syncer.isRunning = false + + syncer.notifyStop(err) + syncer.ctx = nil } logger.Info().Msg("syncer stopped") } +func (syncer *Syncer) notifyStop(err error) { + if syncer.ctx == nil || syncer.ctx.NotifyC == nil { + return + } + + logger.Info().Err(err).Msg("notify syncer stop") + + select { + case syncer.ctx.NotifyC <- err: + default: + logger.Debug().Msg("failed to notify syncer stop") + } +} + func (syncer *Syncer) GetSeq() uint64 { return syncer.Seq } @@ -250,7 +269,7 @@ func (syncer *Syncer) handleMessage(inmsg interface{}) { case *message.FinderResult: err := syncer.handleFinderResult(msg) if err != nil { - syncer.Reset() + syncer.Reset(err) logger.Error().Err(err).Msg("FinderResult failed") } case *message.GetHashesRsp: @@ -259,13 +278,13 @@ func (syncer *Syncer) handleMessage(inmsg interface{}) { case *message.GetBlockChunksRsp: err := syncer.blockFetcher.handleBlockRsp(msg) if err != nil { - syncer.Reset() + syncer.Reset(err) logger.Error().Err(err).Msg("GetBlockChunksRsp failed") } case *message.AddBlockRsp: err := syncer.blockFetcher.handleBlockRsp(msg) if err != nil { - syncer.Reset() + syncer.Reset(err) logger.Error().Err(err).Msg("AddBlockRsp failed") } case *message.SyncStop: @@ -274,7 +293,7 @@ func (syncer *Syncer) handleMessage(inmsg interface{}) { } else { logger.Error().Str("from", msg.FromWho).Err(msg.Err).Msg("syncer try to stop by error") } - syncer.Reset() + syncer.Reset(msg.Err) case *message.CloseFetcher: if msg.FromWho == NameHashFetcher { syncer.hashFetcher.stop() @@ -299,7 +318,7 @@ func (syncer *Syncer) handleSyncStart(msg *message.SyncStart) error { var err error var bestBlock *types.Block - logger.Debug().Uint64("targetNo", msg.TargetNo).Msg("syncer requested") + logger.Debug().Uint64("targetNo", msg.TargetNo).Str("peer", p2putil.ShortForm(msg.PeerID)).Msg("syncer requested") if syncer.isRunning { logger.Debug().Uint64("targetNo", msg.TargetNo).Msg("skipped syncer is running") @@ -326,7 +345,7 @@ func (syncer *Syncer) handleSyncStart(msg *message.SyncStart) error { logger.Info().Uint64("seq", syncer.GetSeq()).Uint64("targetNo", msg.TargetNo).Uint64("bestNo", bestBlockNo).Msg("syncer started") //TODO BP stop - syncer.ctx = types.NewSyncCtx(syncer.GetSeq(), msg.PeerID, msg.TargetNo, bestBlockNo) + syncer.ctx = types.NewSyncCtx(syncer.GetSeq(), msg.PeerID, msg.TargetNo, bestBlockNo, msg.NotifyC) syncer.isRunning = true syncer.finder = newFinder(syncer.ctx, syncer.getCompRequester(), syncer.chain, syncer.syncerCfg) @@ -368,6 +387,11 @@ func (syncer *Syncer) handleGetHashByNoRsp(msg *message.GetHashByNoRsp) { func (syncer *Syncer) handleFinderResult(msg *message.FinderResult) error { logger.Debug().Msg("syncer received finder result message") + if err := chain.TestDebugger.Check(chain.DEBUG_SYNCER_CRASH, 0, nil); err != nil { + chain.TestDebugger.Unset(chain.DEBUG_SYNCER_CRASH) + return err + } + if msg.Err != nil || msg.Ancestor == nil { logger.Error().Err(msg.Err).Msg("Find Ancestor failed") return ErrFinderInternal @@ -439,7 +463,7 @@ func (syncer *Syncer) Statistics() *map[string]interface{} { func (syncer *Syncer) RecoverSyncerSelf() { if r := recover(); r != nil { logger.Error().Str("dest", "SYNCER").Str("callstack", string(debug.Stack())).Msg("syncer recovered it's panic") - syncer.Reset() + syncer.Reset(ErrSyncerPanic) } } diff --git a/test/sample.id b/test/sample.id new file mode 100644 index 000000000..1ab1e00c2 --- /dev/null +++ b/test/sample.id @@ -0,0 +1 @@ +16Uiu2HAmP2iRDpPumUbKhNnEngoxAUQWBmCyn7FaYUrkaDAMXJPJ \ No newline at end of file diff --git a/test/sample.key b/test/sample.key new file mode 100644 index 000000000..780b48df4 --- /dev/null +++ b/test/sample.key @@ -0,0 +1 @@ + h½>¼P¶ùaûTkµ‘²æóã £qÁ²ôíŸ;X‚Í \ No newline at end of file diff --git a/test/sample.pub b/test/sample.pub new file mode 100644 index 000000000..06e289c8e Binary files /dev/null and b/test/sample.pub differ diff --git a/types/account.pb.go b/types/account.pb.go index aba8c2850..c612d520b 100644 --- a/types/account.pb.go +++ b/types/account.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: account.proto -package types // import "github.com/aergoio/aergo/types" +package types -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -29,16 +31,17 @@ func (m *Account) Reset() { *m = Account{} } func (m *Account) String() string { return proto.CompactTextString(m) } func (*Account) ProtoMessage() {} func (*Account) Descriptor() ([]byte, []int) { - return fileDescriptor_account_e24eda885a5c6a02, []int{0} + return fileDescriptor_8e28828dcb8d24f0, []int{0} } + func (m *Account) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Account.Unmarshal(m, b) } func (m *Account) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Account.Marshal(b, m, deterministic) } -func (dst *Account) XXX_Merge(src proto.Message) { - xxx_messageInfo_Account.Merge(dst, src) +func (m *Account) XXX_Merge(src proto.Message) { + xxx_messageInfo_Account.Merge(m, src) } func (m *Account) XXX_Size() int { return xxx_messageInfo_Account.Size(m) @@ -57,7 +60,7 @@ func (m *Account) GetAddress() []byte { } type AccountList struct { - Accounts []*Account `protobuf:"bytes,1,rep,name=accounts" json:"accounts,omitempty"` + Accounts []*Account `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -67,16 +70,17 @@ func (m *AccountList) Reset() { *m = AccountList{} } func (m *AccountList) String() string { return proto.CompactTextString(m) } func (*AccountList) ProtoMessage() {} func (*AccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_account_e24eda885a5c6a02, []int{1} + return fileDescriptor_8e28828dcb8d24f0, []int{1} } + func (m *AccountList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AccountList.Unmarshal(m, b) } func (m *AccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AccountList.Marshal(b, m, deterministic) } -func (dst *AccountList) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountList.Merge(dst, src) +func (m *AccountList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccountList.Merge(m, src) } func (m *AccountList) XXX_Size() int { return xxx_messageInfo_AccountList.Size(m) @@ -99,9 +103,9 @@ func init() { proto.RegisterType((*AccountList)(nil), "types.AccountList") } -func init() { proto.RegisterFile("account.proto", fileDescriptor_account_e24eda885a5c6a02) } +func init() { proto.RegisterFile("account.proto", fileDescriptor_8e28828dcb8d24f0) } -var fileDescriptor_account_e24eda885a5c6a02 = []byte{ +var fileDescriptor_8e28828dcb8d24f0 = []byte{ // 137 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4d, 0x4c, 0x4e, 0xce, 0x2f, 0xcd, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, diff --git a/types/blockchain.go b/types/blockchain.go index 5b269762b..96ab9d81f 100644 --- a/types/blockchain.go +++ b/types/blockchain.go @@ -127,6 +127,7 @@ type ChainAccessor interface { GetBlock(blockHash []byte) (*Block, error) // GetHashByNo returns hash of block. It return nil and error if not found block of that number or there is a problem in db store GetHashByNo(blockNo BlockNo) ([]byte, error) + GetChainStats() string } type SyncContext struct { @@ -142,10 +143,12 @@ type SyncContext struct { TotalCnt uint64 RemainCnt uint64 LastAnchor BlockNo + + NotifyC chan error } -func NewSyncCtx(seq uint64, peerID peer.ID, targetNo uint64, bestNo uint64) *SyncContext { - return &SyncContext{Seq: seq, PeerID: peerID, TargetNo: targetNo, BestNo: bestNo, LastAnchor: 0} +func NewSyncCtx(seq uint64, peerID peer.ID, targetNo uint64, bestNo uint64, notifyC chan error) *SyncContext { + return &SyncContext{Seq: seq, PeerID: peerID, TargetNo: targetNo, BestNo: bestNo, LastAnchor: 0, NotifyC: notifyC} } func (ctx *SyncContext) SetAncestor(ancestor *Block) { @@ -465,6 +468,16 @@ func (block *Block) SetBlocksRootHash(blockRootHash []byte) { block.GetHeader().BlocksRootHash = blockRootHash } +// GetMetadata generates Metadata object for block +func (block *Block) GetMetadata() *BlockMetadata { + return &BlockMetadata{ + Hash: block.BlockHash(), + Header: block.GetHeader(), + Txcount: int32(len(block.GetBody().GetTxs())), + Size: int64(proto.Size(block)), + } +} + // CalculateTxsRootHash generates merkle tree of transactions and returns root hash. func CalculateTxsRootHash(txs []*Tx) []byte { mes := make([]merkle.MerkleEntry, len(txs)) diff --git a/types/blockchain.pb.go b/types/blockchain.pb.go index 9f5562912..9079f6cd1 100644 --- a/types/blockchain.pb.go +++ b/types/blockchain.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: blockchain.proto -package types // import "github.com/aergoio/aergo/types" +package types -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -29,6 +31,7 @@ var TxType_name = map[int32]string{ 0: "NORMAL", 1: "GOVERNANCE", } + var TxType_value = map[string]int32{ "NORMAL": 0, "GOVERNANCE": 1, @@ -37,14 +40,15 @@ var TxType_value = map[string]int32{ func (x TxType) String() string { return proto.EnumName(TxType_name, int32(x)) } + func (TxType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{0} + return fileDescriptor_e9ac6287ce250c9a, []int{0} } type Block struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header *BlockHeader `protobuf:"bytes,2,opt,name=header" json:"header,omitempty"` - Body *BlockBody `protobuf:"bytes,3,opt,name=body" json:"body,omitempty"` + Header *BlockHeader `protobuf:"bytes,2,opt,name=header,proto3" json:"header,omitempty"` + Body *BlockBody `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -54,16 +58,17 @@ func (m *Block) Reset() { *m = Block{} } func (m *Block) String() string { return proto.CompactTextString(m) } func (*Block) ProtoMessage() {} func (*Block) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{0} + return fileDescriptor_e9ac6287ce250c9a, []int{0} } + func (m *Block) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Block.Unmarshal(m, b) } func (m *Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Block.Marshal(b, m, deterministic) } -func (dst *Block) XXX_Merge(src proto.Message) { - xxx_messageInfo_Block.Merge(dst, src) +func (m *Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Block.Merge(m, src) } func (m *Block) XXX_Size() int { return xxx_messageInfo_Block.Size(m) @@ -98,12 +103,12 @@ func (m *Block) GetBody() *BlockBody { type BlockHeader struct { ChainID []byte `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` PrevBlockHash []byte `protobuf:"bytes,2,opt,name=prevBlockHash,proto3" json:"prevBlockHash,omitempty"` - BlockNo uint64 `protobuf:"varint,3,opt,name=blockNo" json:"blockNo,omitempty"` - Timestamp int64 `protobuf:"varint,4,opt,name=timestamp" json:"timestamp,omitempty"` + BlockNo uint64 `protobuf:"varint,3,opt,name=blockNo,proto3" json:"blockNo,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` BlocksRootHash []byte `protobuf:"bytes,5,opt,name=blocksRootHash,proto3" json:"blocksRootHash,omitempty"` TxsRootHash []byte `protobuf:"bytes,6,opt,name=txsRootHash,proto3" json:"txsRootHash,omitempty"` ReceiptsRootHash []byte `protobuf:"bytes,7,opt,name=receiptsRootHash,proto3" json:"receiptsRootHash,omitempty"` - Confirms uint64 `protobuf:"varint,8,opt,name=confirms" json:"confirms,omitempty"` + Confirms uint64 `protobuf:"varint,8,opt,name=confirms,proto3" json:"confirms,omitempty"` PubKey []byte `protobuf:"bytes,9,opt,name=pubKey,proto3" json:"pubKey,omitempty"` CoinbaseAccount []byte `protobuf:"bytes,10,opt,name=coinbaseAccount,proto3" json:"coinbaseAccount,omitempty"` Sign []byte `protobuf:"bytes,11,opt,name=sign,proto3" json:"sign,omitempty"` @@ -116,16 +121,17 @@ func (m *BlockHeader) Reset() { *m = BlockHeader{} } func (m *BlockHeader) String() string { return proto.CompactTextString(m) } func (*BlockHeader) ProtoMessage() {} func (*BlockHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{1} + return fileDescriptor_e9ac6287ce250c9a, []int{1} } + func (m *BlockHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockHeader.Unmarshal(m, b) } func (m *BlockHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockHeader.Marshal(b, m, deterministic) } -func (dst *BlockHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockHeader.Merge(dst, src) +func (m *BlockHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockHeader.Merge(m, src) } func (m *BlockHeader) XXX_Size() int { return xxx_messageInfo_BlockHeader.Size(m) @@ -214,7 +220,7 @@ func (m *BlockHeader) GetSign() []byte { } type BlockBody struct { - Txs []*Tx `protobuf:"bytes,1,rep,name=txs" json:"txs,omitempty"` + Txs []*Tx `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -224,16 +230,17 @@ func (m *BlockBody) Reset() { *m = BlockBody{} } func (m *BlockBody) String() string { return proto.CompactTextString(m) } func (*BlockBody) ProtoMessage() {} func (*BlockBody) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{2} + return fileDescriptor_e9ac6287ce250c9a, []int{2} } + func (m *BlockBody) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockBody.Unmarshal(m, b) } func (m *BlockBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockBody.Marshal(b, m, deterministic) } -func (dst *BlockBody) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockBody.Merge(dst, src) +func (m *BlockBody) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockBody.Merge(m, src) } func (m *BlockBody) XXX_Size() int { return xxx_messageInfo_BlockBody.Size(m) @@ -252,7 +259,7 @@ func (m *BlockBody) GetTxs() []*Tx { } type TxList struct { - Txs []*Tx `protobuf:"bytes,1,rep,name=txs" json:"txs,omitempty"` + Txs []*Tx `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -262,16 +269,17 @@ func (m *TxList) Reset() { *m = TxList{} } func (m *TxList) String() string { return proto.CompactTextString(m) } func (*TxList) ProtoMessage() {} func (*TxList) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{3} + return fileDescriptor_e9ac6287ce250c9a, []int{3} } + func (m *TxList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TxList.Unmarshal(m, b) } func (m *TxList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_TxList.Marshal(b, m, deterministic) } -func (dst *TxList) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxList.Merge(dst, src) +func (m *TxList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxList.Merge(m, src) } func (m *TxList) XXX_Size() int { return xxx_messageInfo_TxList.Size(m) @@ -291,7 +299,7 @@ func (m *TxList) GetTxs() []*Tx { type Tx struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Body *TxBody `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` + Body *TxBody `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -301,16 +309,17 @@ func (m *Tx) Reset() { *m = Tx{} } func (m *Tx) String() string { return proto.CompactTextString(m) } func (*Tx) ProtoMessage() {} func (*Tx) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{4} + return fileDescriptor_e9ac6287ce250c9a, []int{4} } + func (m *Tx) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Tx.Unmarshal(m, b) } func (m *Tx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Tx.Marshal(b, m, deterministic) } -func (dst *Tx) XXX_Merge(src proto.Message) { - xxx_messageInfo_Tx.Merge(dst, src) +func (m *Tx) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tx.Merge(m, src) } func (m *Tx) XXX_Size() int { return xxx_messageInfo_Tx.Size(m) @@ -336,14 +345,14 @@ func (m *Tx) GetBody() *TxBody { } type TxBody struct { - Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` + Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` Account []byte `protobuf:"bytes,2,opt,name=account,proto3" json:"account,omitempty"` Recipient []byte `protobuf:"bytes,3,opt,name=recipient,proto3" json:"recipient,omitempty"` Amount []byte `protobuf:"bytes,4,opt,name=amount,proto3" json:"amount,omitempty"` Payload []byte `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"` - GasLimit uint64 `protobuf:"varint,6,opt,name=gasLimit" json:"gasLimit,omitempty"` + GasLimit uint64 `protobuf:"varint,6,opt,name=gasLimit,proto3" json:"gasLimit,omitempty"` GasPrice []byte `protobuf:"bytes,7,opt,name=gasPrice,proto3" json:"gasPrice,omitempty"` - Type TxType `protobuf:"varint,8,opt,name=type,enum=types.TxType" json:"type,omitempty"` + Type TxType `protobuf:"varint,8,opt,name=type,proto3,enum=types.TxType" json:"type,omitempty"` ChainIdHash []byte `protobuf:"bytes,9,opt,name=chainIdHash,proto3" json:"chainIdHash,omitempty"` Sign []byte `protobuf:"bytes,10,opt,name=sign,proto3" json:"sign,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -355,16 +364,17 @@ func (m *TxBody) Reset() { *m = TxBody{} } func (m *TxBody) String() string { return proto.CompactTextString(m) } func (*TxBody) ProtoMessage() {} func (*TxBody) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{5} + return fileDescriptor_e9ac6287ce250c9a, []int{5} } + func (m *TxBody) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TxBody.Unmarshal(m, b) } func (m *TxBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_TxBody.Marshal(b, m, deterministic) } -func (dst *TxBody) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxBody.Merge(dst, src) +func (m *TxBody) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxBody.Merge(m, src) } func (m *TxBody) XXX_Size() int { return xxx_messageInfo_TxBody.Size(m) @@ -448,7 +458,7 @@ func (m *TxBody) GetSign() []byte { // TxIdx specifies a transaction's block hash and index within the block body type TxIdx struct { BlockHash []byte `protobuf:"bytes,1,opt,name=blockHash,proto3" json:"blockHash,omitempty"` - Idx int32 `protobuf:"varint,2,opt,name=idx" json:"idx,omitempty"` + Idx int32 `protobuf:"varint,2,opt,name=idx,proto3" json:"idx,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -458,16 +468,17 @@ func (m *TxIdx) Reset() { *m = TxIdx{} } func (m *TxIdx) String() string { return proto.CompactTextString(m) } func (*TxIdx) ProtoMessage() {} func (*TxIdx) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{6} + return fileDescriptor_e9ac6287ce250c9a, []int{6} } + func (m *TxIdx) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TxIdx.Unmarshal(m, b) } func (m *TxIdx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_TxIdx.Marshal(b, m, deterministic) } -func (dst *TxIdx) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxIdx.Merge(dst, src) +func (m *TxIdx) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxIdx.Merge(m, src) } func (m *TxIdx) XXX_Size() int { return xxx_messageInfo_TxIdx.Size(m) @@ -493,8 +504,8 @@ func (m *TxIdx) GetIdx() int32 { } type TxInBlock struct { - TxIdx *TxIdx `protobuf:"bytes,1,opt,name=txIdx" json:"txIdx,omitempty"` - Tx *Tx `protobuf:"bytes,2,opt,name=tx" json:"tx,omitempty"` + TxIdx *TxIdx `protobuf:"bytes,1,opt,name=txIdx,proto3" json:"txIdx,omitempty"` + Tx *Tx `protobuf:"bytes,2,opt,name=tx,proto3" json:"tx,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -504,16 +515,17 @@ func (m *TxInBlock) Reset() { *m = TxInBlock{} } func (m *TxInBlock) String() string { return proto.CompactTextString(m) } func (*TxInBlock) ProtoMessage() {} func (*TxInBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{7} + return fileDescriptor_e9ac6287ce250c9a, []int{7} } + func (m *TxInBlock) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TxInBlock.Unmarshal(m, b) } func (m *TxInBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_TxInBlock.Marshal(b, m, deterministic) } -func (dst *TxInBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxInBlock.Merge(dst, src) +func (m *TxInBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxInBlock.Merge(m, src) } func (m *TxInBlock) XXX_Size() int { return xxx_messageInfo_TxInBlock.Size(m) @@ -539,11 +551,11 @@ func (m *TxInBlock) GetTx() *Tx { } type State struct { - Nonce uint64 `protobuf:"varint,1,opt,name=nonce" json:"nonce,omitempty"` + Nonce uint64 `protobuf:"varint,1,opt,name=nonce,proto3" json:"nonce,omitempty"` Balance []byte `protobuf:"bytes,2,opt,name=balance,proto3" json:"balance,omitempty"` CodeHash []byte `protobuf:"bytes,3,opt,name=codeHash,proto3" json:"codeHash,omitempty"` StorageRoot []byte `protobuf:"bytes,4,opt,name=storageRoot,proto3" json:"storageRoot,omitempty"` - SqlRecoveryPoint uint64 `protobuf:"varint,5,opt,name=sqlRecoveryPoint" json:"sqlRecoveryPoint,omitempty"` + SqlRecoveryPoint uint64 `protobuf:"varint,5,opt,name=sqlRecoveryPoint,proto3" json:"sqlRecoveryPoint,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -553,16 +565,17 @@ func (m *State) Reset() { *m = State{} } func (m *State) String() string { return proto.CompactTextString(m) } func (*State) ProtoMessage() {} func (*State) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{8} + return fileDescriptor_e9ac6287ce250c9a, []int{8} } + func (m *State) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_State.Unmarshal(m, b) } func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_State.Marshal(b, m, deterministic) } -func (dst *State) XXX_Merge(src proto.Message) { - xxx_messageInfo_State.Merge(dst, src) +func (m *State) XXX_Merge(src proto.Message) { + xxx_messageInfo_State.Merge(m, src) } func (m *State) XXX_Size() int { return xxx_messageInfo_State.Size(m) @@ -609,13 +622,13 @@ func (m *State) GetSqlRecoveryPoint() uint64 { } type AccountProof struct { - State *State `protobuf:"bytes,1,opt,name=state" json:"state,omitempty"` - Inclusion bool `protobuf:"varint,2,opt,name=inclusion" json:"inclusion,omitempty"` + State *State `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Inclusion bool `protobuf:"varint,2,opt,name=inclusion,proto3" json:"inclusion,omitempty"` Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` ProofKey []byte `protobuf:"bytes,4,opt,name=proofKey,proto3" json:"proofKey,omitempty"` ProofVal []byte `protobuf:"bytes,5,opt,name=proofVal,proto3" json:"proofVal,omitempty"` Bitmap []byte `protobuf:"bytes,6,opt,name=bitmap,proto3" json:"bitmap,omitempty"` - Height uint32 `protobuf:"varint,7,opt,name=height" json:"height,omitempty"` + Height uint32 `protobuf:"varint,7,opt,name=height,proto3" json:"height,omitempty"` AuditPath [][]byte `protobuf:"bytes,8,rep,name=auditPath,proto3" json:"auditPath,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -626,16 +639,17 @@ func (m *AccountProof) Reset() { *m = AccountProof{} } func (m *AccountProof) String() string { return proto.CompactTextString(m) } func (*AccountProof) ProtoMessage() {} func (*AccountProof) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{9} + return fileDescriptor_e9ac6287ce250c9a, []int{9} } + func (m *AccountProof) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AccountProof.Unmarshal(m, b) } func (m *AccountProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AccountProof.Marshal(b, m, deterministic) } -func (dst *AccountProof) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountProof.Merge(dst, src) +func (m *AccountProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccountProof.Merge(m, src) } func (m *AccountProof) XXX_Size() int { return xxx_messageInfo_AccountProof.Size(m) @@ -704,12 +718,12 @@ func (m *AccountProof) GetAuditPath() [][]byte { type ContractVarProof struct { Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Inclusion bool `protobuf:"varint,2,opt,name=inclusion" json:"inclusion,omitempty"` - Key string `protobuf:"bytes,3,opt,name=key" json:"key,omitempty"` + Inclusion bool `protobuf:"varint,2,opt,name=inclusion,proto3" json:"inclusion,omitempty"` + Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` ProofKey []byte `protobuf:"bytes,4,opt,name=proofKey,proto3" json:"proofKey,omitempty"` ProofVal []byte `protobuf:"bytes,5,opt,name=proofVal,proto3" json:"proofVal,omitempty"` Bitmap []byte `protobuf:"bytes,6,opt,name=bitmap,proto3" json:"bitmap,omitempty"` - Height uint32 `protobuf:"varint,7,opt,name=height" json:"height,omitempty"` + Height uint32 `protobuf:"varint,7,opt,name=height,proto3" json:"height,omitempty"` AuditPath [][]byte `protobuf:"bytes,8,rep,name=auditPath,proto3" json:"auditPath,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -720,16 +734,17 @@ func (m *ContractVarProof) Reset() { *m = ContractVarProof{} } func (m *ContractVarProof) String() string { return proto.CompactTextString(m) } func (*ContractVarProof) ProtoMessage() {} func (*ContractVarProof) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{10} + return fileDescriptor_e9ac6287ce250c9a, []int{10} } + func (m *ContractVarProof) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ContractVarProof.Unmarshal(m, b) } func (m *ContractVarProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ContractVarProof.Marshal(b, m, deterministic) } -func (dst *ContractVarProof) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContractVarProof.Merge(dst, src) +func (m *ContractVarProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContractVarProof.Merge(m, src) } func (m *ContractVarProof) XXX_Size() int { return xxx_messageInfo_ContractVarProof.Size(m) @@ -797,8 +812,8 @@ func (m *ContractVarProof) GetAuditPath() [][]byte { } type StateQueryProof struct { - ContractProof *AccountProof `protobuf:"bytes,1,opt,name=contractProof" json:"contractProof,omitempty"` - VarProofs []*ContractVarProof `protobuf:"bytes,2,rep,name=varProofs" json:"varProofs,omitempty"` + ContractProof *AccountProof `protobuf:"bytes,1,opt,name=contractProof,proto3" json:"contractProof,omitempty"` + VarProofs []*ContractVarProof `protobuf:"bytes,2,rep,name=varProofs,proto3" json:"varProofs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -808,16 +823,17 @@ func (m *StateQueryProof) Reset() { *m = StateQueryProof{} } func (m *StateQueryProof) String() string { return proto.CompactTextString(m) } func (*StateQueryProof) ProtoMessage() {} func (*StateQueryProof) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{11} + return fileDescriptor_e9ac6287ce250c9a, []int{11} } + func (m *StateQueryProof) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StateQueryProof.Unmarshal(m, b) } func (m *StateQueryProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StateQueryProof.Marshal(b, m, deterministic) } -func (dst *StateQueryProof) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateQueryProof.Merge(dst, src) +func (m *StateQueryProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateQueryProof.Merge(m, src) } func (m *StateQueryProof) XXX_Size() int { return xxx_messageInfo_StateQueryProof.Size(m) @@ -844,16 +860,16 @@ func (m *StateQueryProof) GetVarProofs() []*ContractVarProof { type Receipt struct { ContractAddress []byte `protobuf:"bytes,1,opt,name=contractAddress,proto3" json:"contractAddress,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` - Ret string `protobuf:"bytes,3,opt,name=ret" json:"ret,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + Ret string `protobuf:"bytes,3,opt,name=ret,proto3" json:"ret,omitempty"` TxHash []byte `protobuf:"bytes,4,opt,name=txHash,proto3" json:"txHash,omitempty"` FeeUsed []byte `protobuf:"bytes,5,opt,name=feeUsed,proto3" json:"feeUsed,omitempty"` CumulativeFeeUsed []byte `protobuf:"bytes,6,opt,name=cumulativeFeeUsed,proto3" json:"cumulativeFeeUsed,omitempty"` Bloom []byte `protobuf:"bytes,7,opt,name=bloom,proto3" json:"bloom,omitempty"` - Events []*Event `protobuf:"bytes,8,rep,name=events" json:"events,omitempty"` - BlockNo uint64 `protobuf:"varint,9,opt,name=blockNo" json:"blockNo,omitempty"` + Events []*Event `protobuf:"bytes,8,rep,name=events,proto3" json:"events,omitempty"` + BlockNo uint64 `protobuf:"varint,9,opt,name=blockNo,proto3" json:"blockNo,omitempty"` BlockHash []byte `protobuf:"bytes,10,opt,name=blockHash,proto3" json:"blockHash,omitempty"` - TxIndex int32 `protobuf:"varint,11,opt,name=txIndex" json:"txIndex,omitempty"` + TxIndex int32 `protobuf:"varint,11,opt,name=txIndex,proto3" json:"txIndex,omitempty"` From []byte `protobuf:"bytes,12,opt,name=from,proto3" json:"from,omitempty"` To []byte `protobuf:"bytes,13,opt,name=to,proto3" json:"to,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -865,16 +881,17 @@ func (m *Receipt) Reset() { *m = Receipt{} } func (m *Receipt) String() string { return proto.CompactTextString(m) } func (*Receipt) ProtoMessage() {} func (*Receipt) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{12} + return fileDescriptor_e9ac6287ce250c9a, []int{12} } + func (m *Receipt) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Receipt.Unmarshal(m, b) } func (m *Receipt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Receipt.Marshal(b, m, deterministic) } -func (dst *Receipt) XXX_Merge(src proto.Message) { - xxx_messageInfo_Receipt.Merge(dst, src) +func (m *Receipt) XXX_Merge(src proto.Message) { + xxx_messageInfo_Receipt.Merge(m, src) } func (m *Receipt) XXX_Size() int { return xxx_messageInfo_Receipt.Size(m) @@ -978,13 +995,13 @@ func (m *Receipt) GetTo() []byte { type Event struct { ContractAddress []byte `protobuf:"bytes,1,opt,name=contractAddress,proto3" json:"contractAddress,omitempty"` - EventName string `protobuf:"bytes,2,opt,name=eventName" json:"eventName,omitempty"` - JsonArgs string `protobuf:"bytes,3,opt,name=jsonArgs" json:"jsonArgs,omitempty"` - EventIdx int32 `protobuf:"varint,4,opt,name=eventIdx" json:"eventIdx,omitempty"` + EventName string `protobuf:"bytes,2,opt,name=eventName,proto3" json:"eventName,omitempty"` + JsonArgs string `protobuf:"bytes,3,opt,name=jsonArgs,proto3" json:"jsonArgs,omitempty"` + EventIdx int32 `protobuf:"varint,4,opt,name=eventIdx,proto3" json:"eventIdx,omitempty"` TxHash []byte `protobuf:"bytes,5,opt,name=txHash,proto3" json:"txHash,omitempty"` BlockHash []byte `protobuf:"bytes,6,opt,name=blockHash,proto3" json:"blockHash,omitempty"` - BlockNo uint64 `protobuf:"varint,7,opt,name=blockNo" json:"blockNo,omitempty"` - TxIndex int32 `protobuf:"varint,8,opt,name=txIndex" json:"txIndex,omitempty"` + BlockNo uint64 `protobuf:"varint,7,opt,name=blockNo,proto3" json:"blockNo,omitempty"` + TxIndex int32 `protobuf:"varint,8,opt,name=txIndex,proto3" json:"txIndex,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -994,16 +1011,17 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{13} + return fileDescriptor_e9ac6287ce250c9a, []int{13} } + func (m *Event) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Event.Unmarshal(m, b) } func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Event.Marshal(b, m, deterministic) } -func (dst *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(dst, src) +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) } func (m *Event) XXX_Size() int { return xxx_messageInfo_Event.Size(m) @@ -1071,7 +1089,7 @@ func (m *Event) GetTxIndex() int32 { } type FnArgument struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1081,16 +1099,17 @@ func (m *FnArgument) Reset() { *m = FnArgument{} } func (m *FnArgument) String() string { return proto.CompactTextString(m) } func (*FnArgument) ProtoMessage() {} func (*FnArgument) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{14} + return fileDescriptor_e9ac6287ce250c9a, []int{14} } + func (m *FnArgument) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FnArgument.Unmarshal(m, b) } func (m *FnArgument) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FnArgument.Marshal(b, m, deterministic) } -func (dst *FnArgument) XXX_Merge(src proto.Message) { - xxx_messageInfo_FnArgument.Merge(dst, src) +func (m *FnArgument) XXX_Merge(src proto.Message) { + xxx_messageInfo_FnArgument.Merge(m, src) } func (m *FnArgument) XXX_Size() int { return xxx_messageInfo_FnArgument.Size(m) @@ -1109,10 +1128,10 @@ func (m *FnArgument) GetName() string { } type Function struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Arguments []*FnArgument `protobuf:"bytes,2,rep,name=arguments" json:"arguments,omitempty"` - Payable bool `protobuf:"varint,3,opt,name=payable" json:"payable,omitempty"` - View bool `protobuf:"varint,4,opt,name=view" json:"view,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Arguments []*FnArgument `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` + Payable bool `protobuf:"varint,3,opt,name=payable,proto3" json:"payable,omitempty"` + View bool `protobuf:"varint,4,opt,name=view,proto3" json:"view,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1122,16 +1141,17 @@ func (m *Function) Reset() { *m = Function{} } func (m *Function) String() string { return proto.CompactTextString(m) } func (*Function) ProtoMessage() {} func (*Function) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{15} + return fileDescriptor_e9ac6287ce250c9a, []int{15} } + func (m *Function) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Function.Unmarshal(m, b) } func (m *Function) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Function.Marshal(b, m, deterministic) } -func (dst *Function) XXX_Merge(src proto.Message) { - xxx_messageInfo_Function.Merge(dst, src) +func (m *Function) XXX_Merge(src proto.Message) { + xxx_messageInfo_Function.Merge(m, src) } func (m *Function) XXX_Size() int { return xxx_messageInfo_Function.Size(m) @@ -1171,9 +1191,9 @@ func (m *Function) GetView() bool { } type StateVar struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"` - Len int32 `protobuf:"varint,3,opt,name=len" json:"len,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Len int32 `protobuf:"varint,3,opt,name=len,proto3" json:"len,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1183,16 +1203,17 @@ func (m *StateVar) Reset() { *m = StateVar{} } func (m *StateVar) String() string { return proto.CompactTextString(m) } func (*StateVar) ProtoMessage() {} func (*StateVar) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{16} + return fileDescriptor_e9ac6287ce250c9a, []int{16} } + func (m *StateVar) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StateVar.Unmarshal(m, b) } func (m *StateVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StateVar.Marshal(b, m, deterministic) } -func (dst *StateVar) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateVar.Merge(dst, src) +func (m *StateVar) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateVar.Merge(m, src) } func (m *StateVar) XXX_Size() int { return xxx_messageInfo_StateVar.Size(m) @@ -1225,10 +1246,10 @@ func (m *StateVar) GetLen() int32 { } type ABI struct { - Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - Language string `protobuf:"bytes,2,opt,name=language" json:"language,omitempty"` - Functions []*Function `protobuf:"bytes,3,rep,name=functions" json:"functions,omitempty"` - StateVariables []*StateVar `protobuf:"bytes,4,rep,name=state_variables,json=stateVariables" json:"state_variables,omitempty"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` + Functions []*Function `protobuf:"bytes,3,rep,name=functions,proto3" json:"functions,omitempty"` + StateVariables []*StateVar `protobuf:"bytes,4,rep,name=state_variables,json=stateVariables,proto3" json:"state_variables,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1238,16 +1259,17 @@ func (m *ABI) Reset() { *m = ABI{} } func (m *ABI) String() string { return proto.CompactTextString(m) } func (*ABI) ProtoMessage() {} func (*ABI) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{17} + return fileDescriptor_e9ac6287ce250c9a, []int{17} } + func (m *ABI) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ABI.Unmarshal(m, b) } func (m *ABI) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ABI.Marshal(b, m, deterministic) } -func (dst *ABI) XXX_Merge(src proto.Message) { - xxx_messageInfo_ABI.Merge(dst, src) +func (m *ABI) XXX_Merge(src proto.Message) { + xxx_messageInfo_ABI.Merge(m, src) } func (m *ABI) XXX_Size() int { return xxx_messageInfo_ABI.Size(m) @@ -1298,16 +1320,17 @@ func (m *Query) Reset() { *m = Query{} } func (m *Query) String() string { return proto.CompactTextString(m) } func (*Query) ProtoMessage() {} func (*Query) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{18} + return fileDescriptor_e9ac6287ce250c9a, []int{18} } + func (m *Query) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Query.Unmarshal(m, b) } func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Query.Marshal(b, m, deterministic) } -func (dst *Query) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query.Merge(dst, src) +func (m *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(m, src) } func (m *Query) XXX_Size() int { return xxx_messageInfo_Query.Size(m) @@ -1334,9 +1357,9 @@ func (m *Query) GetQueryinfo() []byte { type StateQuery struct { ContractAddress []byte `protobuf:"bytes,1,opt,name=contractAddress,proto3" json:"contractAddress,omitempty"` - StorageKeys []string `protobuf:"bytes,2,rep,name=storageKeys" json:"storageKeys,omitempty"` + StorageKeys []string `protobuf:"bytes,2,rep,name=storageKeys,proto3" json:"storageKeys,omitempty"` Root []byte `protobuf:"bytes,3,opt,name=root,proto3" json:"root,omitempty"` - Compressed bool `protobuf:"varint,4,opt,name=compressed" json:"compressed,omitempty"` + Compressed bool `protobuf:"varint,4,opt,name=compressed,proto3" json:"compressed,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1346,16 +1369,17 @@ func (m *StateQuery) Reset() { *m = StateQuery{} } func (m *StateQuery) String() string { return proto.CompactTextString(m) } func (*StateQuery) ProtoMessage() {} func (*StateQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{19} + return fileDescriptor_e9ac6287ce250c9a, []int{19} } + func (m *StateQuery) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StateQuery.Unmarshal(m, b) } func (m *StateQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_StateQuery.Marshal(b, m, deterministic) } -func (dst *StateQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateQuery.Merge(dst, src) +func (m *StateQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_StateQuery.Merge(m, src) } func (m *StateQuery) XXX_Size() int { return xxx_messageInfo_StateQuery.Size(m) @@ -1396,12 +1420,12 @@ func (m *StateQuery) GetCompressed() bool { type FilterInfo struct { ContractAddress []byte `protobuf:"bytes,1,opt,name=contractAddress,proto3" json:"contractAddress,omitempty"` - EventName string `protobuf:"bytes,2,opt,name=eventName" json:"eventName,omitempty"` - Blockfrom uint64 `protobuf:"varint,3,opt,name=blockfrom" json:"blockfrom,omitempty"` - Blockto uint64 `protobuf:"varint,4,opt,name=blockto" json:"blockto,omitempty"` - Desc bool `protobuf:"varint,5,opt,name=desc" json:"desc,omitempty"` + EventName string `protobuf:"bytes,2,opt,name=eventName,proto3" json:"eventName,omitempty"` + Blockfrom uint64 `protobuf:"varint,3,opt,name=blockfrom,proto3" json:"blockfrom,omitempty"` + Blockto uint64 `protobuf:"varint,4,opt,name=blockto,proto3" json:"blockto,omitempty"` + Desc bool `protobuf:"varint,5,opt,name=desc,proto3" json:"desc,omitempty"` ArgFilter []byte `protobuf:"bytes,6,opt,name=argFilter,proto3" json:"argFilter,omitempty"` - RecentBlockCnt int32 `protobuf:"varint,7,opt,name=recentBlockCnt" json:"recentBlockCnt,omitempty"` + RecentBlockCnt int32 `protobuf:"varint,7,opt,name=recentBlockCnt,proto3" json:"recentBlockCnt,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1411,16 +1435,17 @@ func (m *FilterInfo) Reset() { *m = FilterInfo{} } func (m *FilterInfo) String() string { return proto.CompactTextString(m) } func (*FilterInfo) ProtoMessage() {} func (*FilterInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_blockchain_c7f13acaf0991c73, []int{20} + return fileDescriptor_e9ac6287ce250c9a, []int{20} } + func (m *FilterInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_FilterInfo.Unmarshal(m, b) } func (m *FilterInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_FilterInfo.Marshal(b, m, deterministic) } -func (dst *FilterInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_FilterInfo.Merge(dst, src) +func (m *FilterInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_FilterInfo.Merge(m, src) } func (m *FilterInfo) XXX_Size() int { return xxx_messageInfo_FilterInfo.Size(m) @@ -1481,6 +1506,7 @@ func (m *FilterInfo) GetRecentBlockCnt() int32 { } func init() { + proto.RegisterEnum("types.TxType", TxType_name, TxType_value) proto.RegisterType((*Block)(nil), "types.Block") proto.RegisterType((*BlockHeader)(nil), "types.BlockHeader") proto.RegisterType((*BlockBody)(nil), "types.BlockBody") @@ -1502,12 +1528,11 @@ func init() { proto.RegisterType((*Query)(nil), "types.Query") proto.RegisterType((*StateQuery)(nil), "types.StateQuery") proto.RegisterType((*FilterInfo)(nil), "types.FilterInfo") - proto.RegisterEnum("types.TxType", TxType_name, TxType_value) } -func init() { proto.RegisterFile("blockchain.proto", fileDescriptor_blockchain_c7f13acaf0991c73) } +func init() { proto.RegisterFile("blockchain.proto", fileDescriptor_e9ac6287ce250c9a) } -var fileDescriptor_blockchain_c7f13acaf0991c73 = []byte{ +var fileDescriptor_e9ac6287ce250c9a = []byte{ // 1338 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4d, 0x6f, 0x23, 0x45, 0x13, 0x7e, 0xc7, 0xf6, 0x38, 0x76, 0xe5, 0xcb, 0xdb, 0xef, 0x0a, 0x06, 0x58, 0xad, 0xcc, 0x28, diff --git a/types/host.go b/types/host.go new file mode 100644 index 000000000..195e8beb5 --- /dev/null +++ b/types/host.go @@ -0,0 +1,20 @@ +/* + * @file + * @copyright defined in aergo/LICENSE.txt + */ + +package types + +import ( + "time" +) + +// HostAccessor is interface to provide the informations about server +type HostAccessor interface { + // Version return version of this server + Version() string + + // StartTime is the time when server was booted + StartTime() time.Time +} + diff --git a/types/metric.pb.go b/types/metric.pb.go index 485c1d51e..9b5e23b37 100644 --- a/types/metric.pb.go +++ b/types/metric.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: metric.proto -package types // import "github.com/aergoio/aergo/types" +package types -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -31,6 +33,7 @@ var MetricType_name = map[int32]string{ 0: "NOTHING", 1: "P2P_NETWORK", } + var MetricType_value = map[string]int32{ "NOTHING": 0, "P2P_NETWORK": 1, @@ -39,12 +42,13 @@ var MetricType_value = map[string]int32{ func (x MetricType) String() string { return proto.EnumName(MetricType_name, int32(x)) } + func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_metric_295631cc212b5411, []int{0} + return fileDescriptor_da41641f55bff5df, []int{0} } type MetricsRequest struct { - Types []MetricType `protobuf:"varint,1,rep,packed,name=types,enum=types.MetricType" json:"types,omitempty"` + Types []MetricType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=types.MetricType" json:"types,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -54,16 +58,17 @@ func (m *MetricsRequest) Reset() { *m = MetricsRequest{} } func (m *MetricsRequest) String() string { return proto.CompactTextString(m) } func (*MetricsRequest) ProtoMessage() {} func (*MetricsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_metric_295631cc212b5411, []int{0} + return fileDescriptor_da41641f55bff5df, []int{0} } + func (m *MetricsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetricsRequest.Unmarshal(m, b) } func (m *MetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetricsRequest.Marshal(b, m, deterministic) } -func (dst *MetricsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsRequest.Merge(dst, src) +func (m *MetricsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricsRequest.Merge(m, src) } func (m *MetricsRequest) XXX_Size() int { return xxx_messageInfo_MetricsRequest.Size(m) @@ -82,7 +87,7 @@ func (m *MetricsRequest) GetTypes() []MetricType { } type Metrics struct { - Peers []*PeerMetric `protobuf:"bytes,1,rep,name=peers" json:"peers,omitempty"` + Peers []*PeerMetric `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -92,16 +97,17 @@ func (m *Metrics) Reset() { *m = Metrics{} } func (m *Metrics) String() string { return proto.CompactTextString(m) } func (*Metrics) ProtoMessage() {} func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_metric_295631cc212b5411, []int{1} + return fileDescriptor_da41641f55bff5df, []int{1} } + func (m *Metrics) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metrics.Unmarshal(m, b) } func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) } -func (dst *Metrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metrics.Merge(dst, src) +func (m *Metrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metrics.Merge(m, src) } func (m *Metrics) XXX_Size() int { return xxx_messageInfo_Metrics.Size(m) @@ -121,10 +127,10 @@ func (m *Metrics) GetPeers() []*PeerMetric { type PeerMetric struct { PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` - SumIn int64 `protobuf:"varint,2,opt,name=sumIn" json:"sumIn,omitempty"` - AvrIn int64 `protobuf:"varint,3,opt,name=avrIn" json:"avrIn,omitempty"` - SumOut int64 `protobuf:"varint,4,opt,name=sumOut" json:"sumOut,omitempty"` - AvrOut int64 `protobuf:"varint,5,opt,name=avrOut" json:"avrOut,omitempty"` + SumIn int64 `protobuf:"varint,2,opt,name=sumIn,proto3" json:"sumIn,omitempty"` + AvrIn int64 `protobuf:"varint,3,opt,name=avrIn,proto3" json:"avrIn,omitempty"` + SumOut int64 `protobuf:"varint,4,opt,name=sumOut,proto3" json:"sumOut,omitempty"` + AvrOut int64 `protobuf:"varint,5,opt,name=avrOut,proto3" json:"avrOut,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -134,16 +140,17 @@ func (m *PeerMetric) Reset() { *m = PeerMetric{} } func (m *PeerMetric) String() string { return proto.CompactTextString(m) } func (*PeerMetric) ProtoMessage() {} func (*PeerMetric) Descriptor() ([]byte, []int) { - return fileDescriptor_metric_295631cc212b5411, []int{2} + return fileDescriptor_da41641f55bff5df, []int{2} } + func (m *PeerMetric) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PeerMetric.Unmarshal(m, b) } func (m *PeerMetric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PeerMetric.Marshal(b, m, deterministic) } -func (dst *PeerMetric) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerMetric.Merge(dst, src) +func (m *PeerMetric) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerMetric.Merge(m, src) } func (m *PeerMetric) XXX_Size() int { return xxx_messageInfo_PeerMetric.Size(m) @@ -190,15 +197,15 @@ func (m *PeerMetric) GetAvrOut() int64 { } func init() { + proto.RegisterEnum("types.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*MetricsRequest)(nil), "types.MetricsRequest") proto.RegisterType((*Metrics)(nil), "types.Metrics") proto.RegisterType((*PeerMetric)(nil), "types.PeerMetric") - proto.RegisterEnum("types.MetricType", MetricType_name, MetricType_value) } -func init() { proto.RegisterFile("metric.proto", fileDescriptor_metric_295631cc212b5411) } +func init() { proto.RegisterFile("metric.proto", fileDescriptor_da41641f55bff5df) } -var fileDescriptor_metric_295631cc212b5411 = []byte{ +var fileDescriptor_da41641f55bff5df = []byte{ // 251 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x90, 0x4d, 0x4b, 0xc3, 0x40, 0x10, 0x40, 0x5d, 0x63, 0x5a, 0x98, 0x94, 0x5a, 0x17, 0x91, 0x9c, 0x24, 0xf4, 0x62, 0xe8, 0x21, diff --git a/types/node.pb.go b/types/node.pb.go index 498003119..856586526 100644 --- a/types/node.pb.go +++ b/types/node.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: node.proto -package types // import "github.com/aergoio/aergo/types" +package types -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -20,8 +22,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type PeerAddress struct { // address is string representation of ip address or domain name. - Address string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` - Port uint32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` PeerID []byte `protobuf:"bytes,3,opt,name=peerID,proto3" json:"peerID,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -32,16 +34,17 @@ func (m *PeerAddress) Reset() { *m = PeerAddress{} } func (m *PeerAddress) String() string { return proto.CompactTextString(m) } func (*PeerAddress) ProtoMessage() {} func (*PeerAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_node_8e9c8d3902796194, []int{0} + return fileDescriptor_0c843d59d2d938e7, []int{0} } + func (m *PeerAddress) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PeerAddress.Unmarshal(m, b) } func (m *PeerAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PeerAddress.Marshal(b, m, deterministic) } -func (dst *PeerAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerAddress.Merge(dst, src) +func (m *PeerAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerAddress.Merge(m, src) } func (m *PeerAddress) XXX_Size() int { return xxx_messageInfo_PeerAddress.Size(m) @@ -77,9 +80,9 @@ func init() { proto.RegisterType((*PeerAddress)(nil), "types.PeerAddress") } -func init() { proto.RegisterFile("node.proto", fileDescriptor_node_8e9c8d3902796194) } +func init() { proto.RegisterFile("node.proto", fileDescriptor_0c843d59d2d938e7) } -var fileDescriptor_node_8e9c8d3902796194 = []byte{ +var fileDescriptor_0c843d59d2d938e7 = []byte{ // 141 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xca, 0xcb, 0x4f, 0x49, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0x56, 0x0a, 0xe6, diff --git a/types/p2p.pb.go b/types/p2p.pb.go index 7369c00ea..d0c65e9f1 100644 --- a/types/p2p.pb.go +++ b/types/p2p.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: p2p.proto -package types // import "github.com/aergoio/aergo/types" +package types -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -86,6 +88,7 @@ var ResultStatus_name = map[int32]string{ 15: "DATA_LOSS", 16: "UNAUTHENTICATED", } + var ResultStatus_value = map[string]int32{ "OK": 0, "CANCELED": 1, @@ -109,30 +112,32 @@ var ResultStatus_value = map[string]int32{ func (x ResultStatus) String() string { return proto.EnumName(ResultStatus_name, int32(x)) } + func (ResultStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{0} + return fileDescriptor_e7fdddb109e6467a, []int{0} } -// MessageData has datas shared between all app protocols +// MsgHeader contains common properties of all p2p messages type MsgHeader struct { - // client version - ClientVersion string `protobuf:"bytes,1,opt,name=clientVersion" json:"clientVersion,omitempty"` + // Deprecated client version. + ClientVersion string `protobuf:"bytes,1,opt,name=clientVersion,proto3" json:"clientVersion,omitempty"` // unix time - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // allows requesters to use request data when processing a response - Id string `protobuf:"bytes,3,opt,name=id" json:"id,omitempty"` + Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` // Gossip is flag to have receiver peer gossip the message to neighbors - Gossip bool `protobuf:"varint,4,opt,name=gossip" json:"gossip,omitempty"` + // Deprecated whether to gossip other peers is determined by subprotocol since version 0.3.0 . + Gossip bool `protobuf:"varint,4,opt,name=gossip,proto3" json:"gossip,omitempty"` // PeerID is id of node that created the message (not the peer that may have sent it). =base58(mh(sha256(nodePubKey))) PeerID []byte `protobuf:"bytes,5,opt,name=peerID,proto3" json:"peerID,omitempty"` // nodePubKey Authoring node Secp256k1 public key (32bytes) - protobufs serielized NodePubKey []byte `protobuf:"bytes,6,opt,name=nodePubKey,proto3" json:"nodePubKey,omitempty"` // signature of message data + method specific data by message authoring node. format: string([]bytes) Sign []byte `protobuf:"bytes,7,opt,name=sign,proto3" json:"sign,omitempty"` - // - Subprotocol uint32 `protobuf:"varint,8,opt,name=subprotocol" json:"subprotocol,omitempty"` - // - Length uint32 `protobuf:"varint,9,opt,name=length" json:"length,omitempty"` + // sub category of message. the receiving peer determines how to deserialize payload data and whether to spread messages to other peers + Subprotocol uint32 `protobuf:"varint,8,opt,name=subprotocol,proto3" json:"subprotocol,omitempty"` + // size of bytes of the payload + Length uint32 `protobuf:"varint,9,opt,name=length,proto3" json:"length,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -142,16 +147,17 @@ func (m *MsgHeader) Reset() { *m = MsgHeader{} } func (m *MsgHeader) String() string { return proto.CompactTextString(m) } func (*MsgHeader) ProtoMessage() {} func (*MsgHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{0} + return fileDescriptor_e7fdddb109e6467a, []int{0} } + func (m *MsgHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MsgHeader.Unmarshal(m, b) } func (m *MsgHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MsgHeader.Marshal(b, m, deterministic) } -func (dst *MsgHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgHeader.Merge(dst, src) +func (m *MsgHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgHeader.Merge(m, src) } func (m *MsgHeader) XXX_Size() int { return xxx_messageInfo_MsgHeader.Size(m) @@ -225,8 +231,9 @@ func (m *MsgHeader) GetLength() uint32 { return 0 } +// Deprecated P2PMessage is data structure for aergo v0.2 or earlier. This structure is not used anymore since v0.3.0. type P2PMessage struct { - Header *MsgHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + Header *MsgHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -237,16 +244,17 @@ func (m *P2PMessage) Reset() { *m = P2PMessage{} } func (m *P2PMessage) String() string { return proto.CompactTextString(m) } func (*P2PMessage) ProtoMessage() {} func (*P2PMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{1} + return fileDescriptor_e7fdddb109e6467a, []int{1} } + func (m *P2PMessage) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_P2PMessage.Unmarshal(m, b) } func (m *P2PMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_P2PMessage.Marshal(b, m, deterministic) } -func (dst *P2PMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_P2PMessage.Merge(dst, src) +func (m *P2PMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_P2PMessage.Merge(m, src) } func (m *P2PMessage) XXX_Size() int { return xxx_messageInfo_P2PMessage.Size(m) @@ -274,7 +282,7 @@ func (m *P2PMessage) GetData() []byte { // Ping request message type Ping struct { BestBlockHash []byte `protobuf:"bytes,1,opt,name=best_block_hash,json=bestBlockHash,proto3" json:"best_block_hash,omitempty"` - BestHeight uint64 `protobuf:"varint,2,opt,name=best_height,json=bestHeight" json:"best_height,omitempty"` + BestHeight uint64 `protobuf:"varint,2,opt,name=best_height,json=bestHeight,proto3" json:"best_height,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -284,16 +292,17 @@ func (m *Ping) Reset() { *m = Ping{} } func (m *Ping) String() string { return proto.CompactTextString(m) } func (*Ping) ProtoMessage() {} func (*Ping) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{2} + return fileDescriptor_e7fdddb109e6467a, []int{2} } + func (m *Ping) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Ping.Unmarshal(m, b) } func (m *Ping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Ping.Marshal(b, m, deterministic) } -func (dst *Ping) XXX_Merge(src proto.Message) { - xxx_messageInfo_Ping.Merge(dst, src) +func (m *Ping) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ping.Merge(m, src) } func (m *Ping) XXX_Size() int { return xxx_messageInfo_Ping.Size(m) @@ -319,10 +328,9 @@ func (m *Ping) GetBestHeight() uint64 { } // Ping response message -// TODO unify to Ping? If did, how to distinguish message is request or response? type Pong struct { BestBlockHash []byte `protobuf:"bytes,1,opt,name=bestBlockHash,proto3" json:"bestBlockHash,omitempty"` - BestHeight uint64 `protobuf:"varint,2,opt,name=bestHeight" json:"bestHeight,omitempty"` + BestHeight uint64 `protobuf:"varint,2,opt,name=bestHeight,proto3" json:"bestHeight,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -332,16 +340,17 @@ func (m *Pong) Reset() { *m = Pong{} } func (m *Pong) String() string { return proto.CompactTextString(m) } func (*Pong) ProtoMessage() {} func (*Pong) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{3} + return fileDescriptor_e7fdddb109e6467a, []int{3} } + func (m *Pong) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Pong.Unmarshal(m, b) } func (m *Pong) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Pong.Marshal(b, m, deterministic) } -func (dst *Pong) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pong.Merge(dst, src) +func (m *Pong) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pong.Merge(m, src) } func (m *Pong) XXX_Size() int { return xxx_messageInfo_Pong.Size(m) @@ -366,14 +375,16 @@ func (m *Pong) GetBestHeight() uint64 { return 0 } -// Ping request message +// Status is peer status exchanged during handshaking. type Status struct { - Sender *PeerAddress `protobuf:"bytes,1,opt,name=sender" json:"sender,omitempty"` + Sender *PeerAddress `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` BestBlockHash []byte `protobuf:"bytes,2,opt,name=bestBlockHash,proto3" json:"bestBlockHash,omitempty"` - BestHeight uint64 `protobuf:"varint,3,opt,name=bestHeight" json:"bestHeight,omitempty"` + BestHeight uint64 `protobuf:"varint,3,opt,name=bestHeight,proto3" json:"bestHeight,omitempty"` ChainID []byte `protobuf:"bytes,4,opt,name=chainID,proto3" json:"chainID,omitempty"` // noExpose means that peer doesn't want to be known to other peers. - NoExpose bool `protobuf:"varint,5,opt,name=noExpose" json:"noExpose,omitempty"` + NoExpose bool `protobuf:"varint,5,opt,name=noExpose,proto3" json:"noExpose,omitempty"` + // version of server binary + Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -383,16 +394,17 @@ func (m *Status) Reset() { *m = Status{} } func (m *Status) String() string { return proto.CompactTextString(m) } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{4} + return fileDescriptor_e7fdddb109e6467a, []int{4} } + func (m *Status) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Status.Unmarshal(m, b) } func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Status.Marshal(b, m, deterministic) } -func (dst *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(dst, src) +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) } func (m *Status) XXX_Size() int { return xxx_messageInfo_Status.Size(m) @@ -438,8 +450,16 @@ func (m *Status) GetNoExpose() bool { return false } +func (m *Status) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +// GoAwayNotice is sent before host peer is closing connection to remote peer. it contains why the host closing connection. type GoAwayNotice struct { - Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -449,16 +469,17 @@ func (m *GoAwayNotice) Reset() { *m = GoAwayNotice{} } func (m *GoAwayNotice) String() string { return proto.CompactTextString(m) } func (*GoAwayNotice) ProtoMessage() {} func (*GoAwayNotice) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{5} + return fileDescriptor_e7fdddb109e6467a, []int{5} } + func (m *GoAwayNotice) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GoAwayNotice.Unmarshal(m, b) } func (m *GoAwayNotice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GoAwayNotice.Marshal(b, m, deterministic) } -func (dst *GoAwayNotice) XXX_Merge(src proto.Message) { - xxx_messageInfo_GoAwayNotice.Merge(dst, src) +func (m *GoAwayNotice) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoAwayNotice.Merge(m, src) } func (m *GoAwayNotice) XXX_Size() int { return xxx_messageInfo_GoAwayNotice.Size(m) @@ -477,8 +498,8 @@ func (m *GoAwayNotice) GetMessage() string { } type AddressesRequest struct { - Sender *PeerAddress `protobuf:"bytes,1,opt,name=sender" json:"sender,omitempty"` - MaxSize uint32 `protobuf:"varint,2,opt,name=maxSize" json:"maxSize,omitempty"` + Sender *PeerAddress `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` + MaxSize uint32 `protobuf:"varint,2,opt,name=maxSize,proto3" json:"maxSize,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -488,16 +509,17 @@ func (m *AddressesRequest) Reset() { *m = AddressesRequest{} } func (m *AddressesRequest) String() string { return proto.CompactTextString(m) } func (*AddressesRequest) ProtoMessage() {} func (*AddressesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{6} + return fileDescriptor_e7fdddb109e6467a, []int{6} } + func (m *AddressesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AddressesRequest.Unmarshal(m, b) } func (m *AddressesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AddressesRequest.Marshal(b, m, deterministic) } -func (dst *AddressesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressesRequest.Merge(dst, src) +func (m *AddressesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddressesRequest.Merge(m, src) } func (m *AddressesRequest) XXX_Size() int { return xxx_messageInfo_AddressesRequest.Size(m) @@ -523,8 +545,8 @@ func (m *AddressesRequest) GetMaxSize() uint32 { } type AddressesResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` - Peers []*PeerAddress `protobuf:"bytes,2,rep,name=peers" json:"peers,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` + Peers []*PeerAddress `protobuf:"bytes,2,rep,name=peers,proto3" json:"peers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -534,16 +556,17 @@ func (m *AddressesResponse) Reset() { *m = AddressesResponse{} } func (m *AddressesResponse) String() string { return proto.CompactTextString(m) } func (*AddressesResponse) ProtoMessage() {} func (*AddressesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{7} + return fileDescriptor_e7fdddb109e6467a, []int{7} } + func (m *AddressesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AddressesResponse.Unmarshal(m, b) } func (m *AddressesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AddressesResponse.Marshal(b, m, deterministic) } -func (dst *AddressesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressesResponse.Merge(dst, src) +func (m *AddressesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddressesResponse.Merge(m, src) } func (m *AddressesResponse) XXX_Size() int { return xxx_messageInfo_AddressesResponse.Size(m) @@ -568,9 +591,12 @@ func (m *AddressesResponse) GetPeers() []*PeerAddress { return nil } +// NewBlockNotice is sent to other peers when host node add a block, which is not produced by this host peer (i.e. added block +// that other bp node produced.) It contains just hash and blockNo. The host node will not send notice if target receiving peer +// knows that block already at best effort. type NewBlockNotice struct { BlockHash []byte `protobuf:"bytes,1,opt,name=blockHash,proto3" json:"blockHash,omitempty"` - BlockNo uint64 `protobuf:"varint,2,opt,name=blockNo" json:"blockNo,omitempty"` + BlockNo uint64 `protobuf:"varint,2,opt,name=blockNo,proto3" json:"blockNo,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -580,16 +606,17 @@ func (m *NewBlockNotice) Reset() { *m = NewBlockNotice{} } func (m *NewBlockNotice) String() string { return proto.CompactTextString(m) } func (*NewBlockNotice) ProtoMessage() {} func (*NewBlockNotice) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{8} + return fileDescriptor_e7fdddb109e6467a, []int{8} } + func (m *NewBlockNotice) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NewBlockNotice.Unmarshal(m, b) } func (m *NewBlockNotice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NewBlockNotice.Marshal(b, m, deterministic) } -func (dst *NewBlockNotice) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewBlockNotice.Merge(dst, src) +func (m *NewBlockNotice) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewBlockNotice.Merge(m, src) } func (m *NewBlockNotice) XXX_Size() int { return xxx_messageInfo_NewBlockNotice.Size(m) @@ -614,10 +641,12 @@ func (m *NewBlockNotice) GetBlockNo() uint64 { return 0 } +// BlockProducedNotice is sent when BP created blocks and host peer is BP (or surrogate of BP) and receiving peer is also trusted BP or surrogate of BP. +// It contains whole block information type BlockProducedNotice struct { ProducerID []byte `protobuf:"bytes,1,opt,name=producerID,proto3" json:"producerID,omitempty"` - BlockNo uint64 `protobuf:"varint,2,opt,name=blockNo" json:"blockNo,omitempty"` - Block *Block `protobuf:"bytes,3,opt,name=block" json:"block,omitempty"` + BlockNo uint64 `protobuf:"varint,2,opt,name=blockNo,proto3" json:"blockNo,omitempty"` + Block *Block `protobuf:"bytes,3,opt,name=block,proto3" json:"block,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -627,16 +656,17 @@ func (m *BlockProducedNotice) Reset() { *m = BlockProducedNotice{} } func (m *BlockProducedNotice) String() string { return proto.CompactTextString(m) } func (*BlockProducedNotice) ProtoMessage() {} func (*BlockProducedNotice) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{9} + return fileDescriptor_e7fdddb109e6467a, []int{9} } + func (m *BlockProducedNotice) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockProducedNotice.Unmarshal(m, b) } func (m *BlockProducedNotice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockProducedNotice.Marshal(b, m, deterministic) } -func (dst *BlockProducedNotice) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockProducedNotice.Merge(dst, src) +func (m *BlockProducedNotice) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockProducedNotice.Merge(m, src) } func (m *BlockProducedNotice) XXX_Size() int { return xxx_messageInfo_BlockProducedNotice.Size(m) @@ -673,11 +703,11 @@ type GetBlockHeadersRequest struct { // Hash indicated referenced block hash. server will return headers from this block. Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` // Block height instead of hash will be used for the first returned block, if hash is nil or empty - Height uint64 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` - Offset uint64 `protobuf:"varint,3,opt,name=offset" json:"offset,omitempty"` - Size uint32 `protobuf:"varint,4,opt,name=size" json:"size,omitempty"` + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Offset uint64 `protobuf:"varint,3,opt,name=offset,proto3" json:"offset,omitempty"` + Size uint32 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` // default is false. - Asc bool `protobuf:"varint,5,opt,name=asc" json:"asc,omitempty"` + Asc bool `protobuf:"varint,5,opt,name=asc,proto3" json:"asc,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -687,16 +717,17 @@ func (m *GetBlockHeadersRequest) Reset() { *m = GetBlockHeadersRequest{} func (m *GetBlockHeadersRequest) String() string { return proto.CompactTextString(m) } func (*GetBlockHeadersRequest) ProtoMessage() {} func (*GetBlockHeadersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{10} + return fileDescriptor_e7fdddb109e6467a, []int{10} } + func (m *GetBlockHeadersRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetBlockHeadersRequest.Unmarshal(m, b) } func (m *GetBlockHeadersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetBlockHeadersRequest.Marshal(b, m, deterministic) } -func (dst *GetBlockHeadersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetBlockHeadersRequest.Merge(dst, src) +func (m *GetBlockHeadersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBlockHeadersRequest.Merge(m, src) } func (m *GetBlockHeadersRequest) XXX_Size() int { return xxx_messageInfo_GetBlockHeadersRequest.Size(m) @@ -744,10 +775,10 @@ func (m *GetBlockHeadersRequest) GetAsc() bool { // GetBlockResponse contains response of GetBlockRequest. type GetBlockHeadersResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` Hashes [][]byte `protobuf:"bytes,2,rep,name=hashes,proto3" json:"hashes,omitempty"` - Headers []*BlockHeader `protobuf:"bytes,3,rep,name=headers" json:"headers,omitempty"` - HasNext bool `protobuf:"varint,4,opt,name=hasNext" json:"hasNext,omitempty"` + Headers []*BlockHeader `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty"` + HasNext bool `protobuf:"varint,4,opt,name=hasNext,proto3" json:"hasNext,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -757,16 +788,17 @@ func (m *GetBlockHeadersResponse) Reset() { *m = GetBlockHeadersResponse func (m *GetBlockHeadersResponse) String() string { return proto.CompactTextString(m) } func (*GetBlockHeadersResponse) ProtoMessage() {} func (*GetBlockHeadersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{11} + return fileDescriptor_e7fdddb109e6467a, []int{11} } + func (m *GetBlockHeadersResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetBlockHeadersResponse.Unmarshal(m, b) } func (m *GetBlockHeadersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetBlockHeadersResponse.Marshal(b, m, deterministic) } -func (dst *GetBlockHeadersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetBlockHeadersResponse.Merge(dst, src) +func (m *GetBlockHeadersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBlockHeadersResponse.Merge(m, src) } func (m *GetBlockHeadersResponse) XXX_Size() int { return xxx_messageInfo_GetBlockHeadersResponse.Size(m) @@ -817,16 +849,17 @@ func (m *GetBlockRequest) Reset() { *m = GetBlockRequest{} } func (m *GetBlockRequest) String() string { return proto.CompactTextString(m) } func (*GetBlockRequest) ProtoMessage() {} func (*GetBlockRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{12} + return fileDescriptor_e7fdddb109e6467a, []int{12} } + func (m *GetBlockRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetBlockRequest.Unmarshal(m, b) } func (m *GetBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetBlockRequest.Marshal(b, m, deterministic) } -func (dst *GetBlockRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetBlockRequest.Merge(dst, src) +func (m *GetBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBlockRequest.Merge(m, src) } func (m *GetBlockRequest) XXX_Size() int { return xxx_messageInfo_GetBlockRequest.Size(m) @@ -846,9 +879,9 @@ func (m *GetBlockRequest) GetHashes() [][]byte { // GetBlockResponse contains response of GetBlockRequest. type GetBlockResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` - Blocks []*Block `protobuf:"bytes,2,rep,name=blocks" json:"blocks,omitempty"` - HasNext bool `protobuf:"varint,3,opt,name=hasNext" json:"hasNext,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` + Blocks []*Block `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` + HasNext bool `protobuf:"varint,3,opt,name=hasNext,proto3" json:"hasNext,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -858,16 +891,17 @@ func (m *GetBlockResponse) Reset() { *m = GetBlockResponse{} } func (m *GetBlockResponse) String() string { return proto.CompactTextString(m) } func (*GetBlockResponse) ProtoMessage() {} func (*GetBlockResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{13} + return fileDescriptor_e7fdddb109e6467a, []int{13} } + func (m *GetBlockResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetBlockResponse.Unmarshal(m, b) } func (m *GetBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetBlockResponse.Marshal(b, m, deterministic) } -func (dst *GetBlockResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetBlockResponse.Merge(dst, src) +func (m *GetBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBlockResponse.Merge(m, src) } func (m *GetBlockResponse) XXX_Size() int { return xxx_messageInfo_GetBlockResponse.Size(m) @@ -910,16 +944,17 @@ func (m *NewTransactionsNotice) Reset() { *m = NewTransactionsNotice{} } func (m *NewTransactionsNotice) String() string { return proto.CompactTextString(m) } func (*NewTransactionsNotice) ProtoMessage() {} func (*NewTransactionsNotice) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{14} + return fileDescriptor_e7fdddb109e6467a, []int{14} } + func (m *NewTransactionsNotice) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NewTransactionsNotice.Unmarshal(m, b) } func (m *NewTransactionsNotice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NewTransactionsNotice.Marshal(b, m, deterministic) } -func (dst *NewTransactionsNotice) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewTransactionsNotice.Merge(dst, src) +func (m *NewTransactionsNotice) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewTransactionsNotice.Merge(m, src) } func (m *NewTransactionsNotice) XXX_Size() int { return xxx_messageInfo_NewTransactionsNotice.Size(m) @@ -948,16 +983,17 @@ func (m *GetTransactionsRequest) Reset() { *m = GetTransactionsRequest{} func (m *GetTransactionsRequest) String() string { return proto.CompactTextString(m) } func (*GetTransactionsRequest) ProtoMessage() {} func (*GetTransactionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{15} + return fileDescriptor_e7fdddb109e6467a, []int{15} } + func (m *GetTransactionsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetTransactionsRequest.Unmarshal(m, b) } func (m *GetTransactionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetTransactionsRequest.Marshal(b, m, deterministic) } -func (dst *GetTransactionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTransactionsRequest.Merge(dst, src) +func (m *GetTransactionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTransactionsRequest.Merge(m, src) } func (m *GetTransactionsRequest) XXX_Size() int { return xxx_messageInfo_GetTransactionsRequest.Size(m) @@ -976,10 +1012,10 @@ func (m *GetTransactionsRequest) GetHashes() [][]byte { } type GetTransactionsResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` Hashes [][]byte `protobuf:"bytes,2,rep,name=hashes,proto3" json:"hashes,omitempty"` - Txs []*Tx `protobuf:"bytes,3,rep,name=txs" json:"txs,omitempty"` - HasNext bool `protobuf:"varint,4,opt,name=hasNext" json:"hasNext,omitempty"` + Txs []*Tx `protobuf:"bytes,3,rep,name=txs,proto3" json:"txs,omitempty"` + HasNext bool `protobuf:"varint,4,opt,name=hasNext,proto3" json:"hasNext,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -989,16 +1025,17 @@ func (m *GetTransactionsResponse) Reset() { *m = GetTransactionsResponse func (m *GetTransactionsResponse) String() string { return proto.CompactTextString(m) } func (*GetTransactionsResponse) ProtoMessage() {} func (*GetTransactionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{16} + return fileDescriptor_e7fdddb109e6467a, []int{16} } + func (m *GetTransactionsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetTransactionsResponse.Unmarshal(m, b) } func (m *GetTransactionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetTransactionsResponse.Marshal(b, m, deterministic) } -func (dst *GetTransactionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTransactionsResponse.Merge(dst, src) +func (m *GetTransactionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTransactionsResponse.Merge(m, src) } func (m *GetTransactionsResponse) XXX_Size() int { return xxx_messageInfo_GetTransactionsResponse.Size(m) @@ -1052,16 +1089,17 @@ func (m *GetMissingRequest) Reset() { *m = GetMissingRequest{} } func (m *GetMissingRequest) String() string { return proto.CompactTextString(m) } func (*GetMissingRequest) ProtoMessage() {} func (*GetMissingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{17} + return fileDescriptor_e7fdddb109e6467a, []int{17} } + func (m *GetMissingRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetMissingRequest.Unmarshal(m, b) } func (m *GetMissingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetMissingRequest.Marshal(b, m, deterministic) } -func (dst *GetMissingRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetMissingRequest.Merge(dst, src) +func (m *GetMissingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetMissingRequest.Merge(m, src) } func (m *GetMissingRequest) XXX_Size() int { return xxx_messageInfo_GetMissingRequest.Size(m) @@ -1098,16 +1136,17 @@ func (m *GetAncestorRequest) Reset() { *m = GetAncestorRequest{} } func (m *GetAncestorRequest) String() string { return proto.CompactTextString(m) } func (*GetAncestorRequest) ProtoMessage() {} func (*GetAncestorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{18} + return fileDescriptor_e7fdddb109e6467a, []int{18} } + func (m *GetAncestorRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetAncestorRequest.Unmarshal(m, b) } func (m *GetAncestorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetAncestorRequest.Marshal(b, m, deterministic) } -func (dst *GetAncestorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAncestorRequest.Merge(dst, src) +func (m *GetAncestorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAncestorRequest.Merge(m, src) } func (m *GetAncestorRequest) XXX_Size() int { return xxx_messageInfo_GetAncestorRequest.Size(m) @@ -1126,9 +1165,9 @@ func (m *GetAncestorRequest) GetHashes() [][]byte { } type GetAncestorResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` AncestorHash []byte `protobuf:"bytes,2,opt,name=ancestorHash,proto3" json:"ancestorHash,omitempty"` - AncestorNo uint64 `protobuf:"varint,3,opt,name=ancestorNo" json:"ancestorNo,omitempty"` + AncestorNo uint64 `protobuf:"varint,3,opt,name=ancestorNo,proto3" json:"ancestorNo,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1138,16 +1177,17 @@ func (m *GetAncestorResponse) Reset() { *m = GetAncestorResponse{} } func (m *GetAncestorResponse) String() string { return proto.CompactTextString(m) } func (*GetAncestorResponse) ProtoMessage() {} func (*GetAncestorResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{19} + return fileDescriptor_e7fdddb109e6467a, []int{19} } + func (m *GetAncestorResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetAncestorResponse.Unmarshal(m, b) } func (m *GetAncestorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetAncestorResponse.Marshal(b, m, deterministic) } -func (dst *GetAncestorResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAncestorResponse.Merge(dst, src) +func (m *GetAncestorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAncestorResponse.Merge(m, src) } func (m *GetAncestorResponse) XXX_Size() int { return xxx_messageInfo_GetAncestorResponse.Size(m) @@ -1180,7 +1220,7 @@ func (m *GetAncestorResponse) GetAncestorNo() uint64 { } type GetHashByNo struct { - BlockNo uint64 `protobuf:"varint,1,opt,name=blockNo" json:"blockNo,omitempty"` + BlockNo uint64 `protobuf:"varint,1,opt,name=blockNo,proto3" json:"blockNo,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1190,16 +1230,17 @@ func (m *GetHashByNo) Reset() { *m = GetHashByNo{} } func (m *GetHashByNo) String() string { return proto.CompactTextString(m) } func (*GetHashByNo) ProtoMessage() {} func (*GetHashByNo) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{20} + return fileDescriptor_e7fdddb109e6467a, []int{20} } + func (m *GetHashByNo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetHashByNo.Unmarshal(m, b) } func (m *GetHashByNo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetHashByNo.Marshal(b, m, deterministic) } -func (dst *GetHashByNo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHashByNo.Merge(dst, src) +func (m *GetHashByNo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHashByNo.Merge(m, src) } func (m *GetHashByNo) XXX_Size() int { return xxx_messageInfo_GetHashByNo.Size(m) @@ -1218,7 +1259,7 @@ func (m *GetHashByNo) GetBlockNo() uint64 { } type GetHashByNoResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` BlockHash []byte `protobuf:"bytes,2,opt,name=blockHash,proto3" json:"blockHash,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1229,16 +1270,17 @@ func (m *GetHashByNoResponse) Reset() { *m = GetHashByNoResponse{} } func (m *GetHashByNoResponse) String() string { return proto.CompactTextString(m) } func (*GetHashByNoResponse) ProtoMessage() {} func (*GetHashByNoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{21} + return fileDescriptor_e7fdddb109e6467a, []int{21} } + func (m *GetHashByNoResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetHashByNoResponse.Unmarshal(m, b) } func (m *GetHashByNoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetHashByNoResponse.Marshal(b, m, deterministic) } -func (dst *GetHashByNoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHashByNoResponse.Merge(dst, src) +func (m *GetHashByNoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHashByNoResponse.Merge(m, src) } func (m *GetHashByNoResponse) XXX_Size() int { return xxx_messageInfo_GetHashByNoResponse.Size(m) @@ -1268,9 +1310,9 @@ type GetHashesRequest struct { // prevHash indicated referenced block hash. server will return hashes after this block. PrevHash []byte `protobuf:"bytes,1,opt,name=prevHash,proto3" json:"prevHash,omitempty"` // prevNumber indicated referenced block - PrevNumber uint64 `protobuf:"varint,2,opt,name=prevNumber" json:"prevNumber,omitempty"` + PrevNumber uint64 `protobuf:"varint,2,opt,name=prevNumber,proto3" json:"prevNumber,omitempty"` // maximum count of hashes that want to get - Size uint64 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` + Size uint64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1280,16 +1322,17 @@ func (m *GetHashesRequest) Reset() { *m = GetHashesRequest{} } func (m *GetHashesRequest) String() string { return proto.CompactTextString(m) } func (*GetHashesRequest) ProtoMessage() {} func (*GetHashesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{22} + return fileDescriptor_e7fdddb109e6467a, []int{22} } + func (m *GetHashesRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetHashesRequest.Unmarshal(m, b) } func (m *GetHashesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetHashesRequest.Marshal(b, m, deterministic) } -func (dst *GetHashesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHashesRequest.Merge(dst, src) +func (m *GetHashesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHashesRequest.Merge(m, src) } func (m *GetHashesRequest) XXX_Size() int { return xxx_messageInfo_GetHashesRequest.Size(m) @@ -1323,9 +1366,9 @@ func (m *GetHashesRequest) GetSize() uint64 { // GetHashesResponse contains response of GetHashesRequest. type GetHashesResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` Hashes [][]byte `protobuf:"bytes,2,rep,name=hashes,proto3" json:"hashes,omitempty"` - HasNext bool `protobuf:"varint,3,opt,name=hasNext" json:"hasNext,omitempty"` + HasNext bool `protobuf:"varint,3,opt,name=hasNext,proto3" json:"hasNext,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1335,16 +1378,17 @@ func (m *GetHashesResponse) Reset() { *m = GetHashesResponse{} } func (m *GetHashesResponse) String() string { return proto.CompactTextString(m) } func (*GetHashesResponse) ProtoMessage() {} func (*GetHashesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_p2p_076915cd835bf79b, []int{23} + return fileDescriptor_e7fdddb109e6467a, []int{23} } + func (m *GetHashesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetHashesResponse.Unmarshal(m, b) } func (m *GetHashesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GetHashesResponse.Marshal(b, m, deterministic) } -func (dst *GetHashesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHashesResponse.Merge(dst, src) +func (m *GetHashesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHashesResponse.Merge(m, src) } func (m *GetHashesResponse) XXX_Size() int { return xxx_messageInfo_GetHashesResponse.Size(m) @@ -1377,6 +1421,7 @@ func (m *GetHashesResponse) GetHasNext() bool { } func init() { + proto.RegisterEnum("types.ResultStatus", ResultStatus_name, ResultStatus_value) proto.RegisterType((*MsgHeader)(nil), "types.MsgHeader") proto.RegisterType((*P2PMessage)(nil), "types.P2PMessage") proto.RegisterType((*Ping)(nil), "types.Ping") @@ -1401,86 +1446,86 @@ func init() { proto.RegisterType((*GetHashByNoResponse)(nil), "types.GetHashByNoResponse") proto.RegisterType((*GetHashesRequest)(nil), "types.GetHashesRequest") proto.RegisterType((*GetHashesResponse)(nil), "types.GetHashesResponse") - proto.RegisterEnum("types.ResultStatus", ResultStatus_name, ResultStatus_value) } -func init() { proto.RegisterFile("p2p.proto", fileDescriptor_p2p_076915cd835bf79b) } +func init() { proto.RegisterFile("p2p.proto", fileDescriptor_e7fdddb109e6467a) } -var fileDescriptor_p2p_076915cd835bf79b = []byte{ - // 1194 bytes of a gzipped FileDescriptorProto +var fileDescriptor_e7fdddb109e6467a = []byte{ + // 1203 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x72, 0xda, 0x46, 0x14, 0xae, 0x00, 0x63, 0x38, 0x80, 0x2d, 0xaf, 0x9b, 0x84, 0x71, 0x33, 0x2e, 0xa3, 0xc9, 0xb4, 0x34, 0xcd, 0x38, 0x1d, 0xe7, 0x09, 0x64, 0x6b, 0x03, 0x6a, 0xf0, 0x8a, 0x59, 0x20, 0x4d, 0x7b, 0x43, 0x05, 0x6c, 0x40, 0xad, 0x2d, 0xa9, 0xda, 0x25, 0xc1, 0xb9, 0xe9, 0x4c, 0x2f, 0xfa, 0x06, - 0x7d, 0x85, 0x5e, 0xf7, 0x09, 0xfa, 0x66, 0x9d, 0xe9, 0xec, 0x6a, 0x05, 0x22, 0x3f, 0xf5, 0xd4, - 0x93, 0x2b, 0xef, 0xf9, 0xf6, 0xe8, 0x3b, 0x3f, 0xdf, 0xd9, 0x63, 0xa0, 0x1a, 0x9f, 0xc6, 0x27, - 0x71, 0x12, 0x89, 0x08, 0xed, 0x88, 0xeb, 0x98, 0xf1, 0x23, 0x73, 0x72, 0x19, 0x4d, 0x7f, 0x9e, - 0x2e, 0xfc, 0x20, 0x4c, 0x2f, 0x8e, 0x20, 0x8c, 0x66, 0x2c, 0x3d, 0x5b, 0xff, 0x18, 0x50, 0xbd, - 0xe0, 0xf3, 0x2e, 0xf3, 0x67, 0x2c, 0x41, 0x0f, 0xa0, 0x31, 0xbd, 0x0c, 0x58, 0x28, 0x9e, 0xb3, - 0x84, 0x07, 0x51, 0xd8, 0x34, 0x5a, 0x46, 0xbb, 0x4a, 0xb7, 0x41, 0x74, 0x1f, 0xaa, 0x22, 0xb8, - 0x62, 0x5c, 0xf8, 0x57, 0x71, 0xb3, 0xd0, 0x32, 0xda, 0x45, 0xba, 0x01, 0xd0, 0x1e, 0x14, 0x82, - 0x59, 0xb3, 0xa8, 0x3e, 0x2c, 0x04, 0x33, 0x74, 0x17, 0xca, 0xf3, 0x88, 0xf3, 0x20, 0x6e, 0x96, - 0x5a, 0x46, 0xbb, 0x42, 0xb5, 0x25, 0xf1, 0x98, 0xb1, 0xc4, 0x75, 0x9a, 0x3b, 0x2d, 0xa3, 0x5d, - 0xa7, 0xda, 0x42, 0xc7, 0xa0, 0xf2, 0xeb, 0x2f, 0x27, 0xcf, 0xd8, 0x75, 0xb3, 0xac, 0xee, 0x72, - 0x08, 0x42, 0x50, 0xe2, 0xc1, 0x3c, 0x6c, 0xee, 0xaa, 0x1b, 0x75, 0x46, 0x2d, 0xa8, 0xf1, 0xe5, - 0x44, 0x55, 0x34, 0x8d, 0x2e, 0x9b, 0x95, 0x96, 0xd1, 0x6e, 0xd0, 0x3c, 0x24, 0xa3, 0x5d, 0xb2, - 0x70, 0x2e, 0x16, 0xcd, 0xaa, 0xba, 0xd4, 0x96, 0xf5, 0x2d, 0x40, 0xff, 0xb4, 0x7f, 0xc1, 0x38, - 0xf7, 0xe7, 0x0c, 0xb5, 0xa1, 0xbc, 0x50, 0x9d, 0x50, 0x85, 0xd7, 0x4e, 0xcd, 0x13, 0xd5, 0xc3, - 0x93, 0x75, 0x87, 0xa8, 0xbe, 0x97, 0x59, 0xcc, 0x7c, 0xe1, 0xab, 0xf2, 0xeb, 0x54, 0x9d, 0x2d, - 0x0f, 0x4a, 0xfd, 0x20, 0x9c, 0xa3, 0x2f, 0x60, 0x7f, 0xc2, 0xb8, 0x18, 0xab, 0xc6, 0x8f, 0x17, - 0x3e, 0x5f, 0x28, 0xba, 0x3a, 0x6d, 0x48, 0xf8, 0x4c, 0xa2, 0x5d, 0x9f, 0x2f, 0xd0, 0xe7, 0x50, - 0x53, 0x7e, 0x0b, 0x16, 0xcc, 0x17, 0x42, 0x51, 0x95, 0x28, 0x48, 0xa8, 0xab, 0x10, 0xab, 0x07, - 0xa5, 0x7e, 0x14, 0xce, 0xa5, 0x2c, 0x5b, 0x5f, 0xbe, 0x9f, 0xee, 0x18, 0x72, 0xdf, 0xbe, 0x87, - 0xed, 0x2f, 0x03, 0xca, 0x03, 0xe1, 0x8b, 0x25, 0x47, 0x0f, 0xa1, 0xcc, 0x59, 0xb8, 0xa9, 0x13, - 0xe9, 0x3a, 0xfb, 0x8c, 0x25, 0xf6, 0x6c, 0x96, 0x30, 0xce, 0xa9, 0xf6, 0x78, 0x37, 0x78, 0xe1, - 0xe6, 0xe0, 0xc5, 0xb7, 0x83, 0xa3, 0x26, 0xec, 0xaa, 0x11, 0x74, 0x1d, 0x35, 0x06, 0x75, 0x9a, - 0x99, 0xe8, 0x08, 0x2a, 0x61, 0x84, 0x57, 0x71, 0xc4, 0x99, 0x9a, 0x84, 0x0a, 0x5d, 0xdb, 0x56, - 0x1b, 0xea, 0x9d, 0xc8, 0x7e, 0xed, 0x5f, 0x93, 0x48, 0x04, 0x53, 0x26, 0x59, 0xae, 0x52, 0xa9, - 0xf4, 0x64, 0x66, 0xa6, 0xf5, 0x02, 0x4c, 0x9d, 0x38, 0xe3, 0x94, 0xfd, 0xb2, 0x64, 0x5c, 0xfc, - 0xaf, 0x2a, 0x25, 0xb3, 0xbf, 0x1a, 0x04, 0x6f, 0x98, 0xaa, 0xaf, 0x41, 0x33, 0xd3, 0xfa, 0x09, - 0x0e, 0x72, 0xcc, 0x3c, 0x8e, 0x42, 0xce, 0xd0, 0xd7, 0x50, 0xe6, 0xaa, 0x95, 0x8a, 0x7a, 0xef, - 0xf4, 0x50, 0x53, 0x53, 0xc6, 0x97, 0x97, 0x22, 0xed, 0x32, 0xd5, 0x2e, 0xa8, 0x0d, 0x3b, 0x72, - 0xb6, 0x79, 0xb3, 0xd0, 0x2a, 0x7e, 0x20, 0x8d, 0xd4, 0xc1, 0xea, 0xc2, 0x1e, 0x61, 0xaf, 0x55, - 0x57, 0x75, 0xc5, 0xf7, 0xa1, 0x3a, 0x79, 0x4b, 0xf6, 0x0d, 0x20, 0xb3, 0x9e, 0xa4, 0xce, 0x5a, - 0xef, 0xcc, 0xb4, 0x38, 0x1c, 0x2a, 0x9a, 0x7e, 0x12, 0xcd, 0x96, 0x53, 0x36, 0xd3, 0x74, 0xc7, - 0x00, 0x71, 0x8a, 0xc8, 0x87, 0x97, 0xf2, 0xe5, 0x90, 0x0f, 0x13, 0x22, 0x0b, 0x76, 0xd4, 0x51, - 0x69, 0x5b, 0x3b, 0xad, 0xeb, 0x22, 0x54, 0x10, 0x9a, 0x5e, 0x59, 0xbf, 0x19, 0x70, 0xb7, 0xc3, - 0xf4, 0x54, 0xa8, 0x77, 0xb2, 0xd6, 0x02, 0x41, 0x29, 0xf7, 0x10, 0xd4, 0x59, 0xbe, 0xc9, 0xad, - 0xd1, 0xd7, 0x96, 0xc4, 0xa3, 0x97, 0x2f, 0x39, 0xcb, 0xe6, 0x48, 0x5b, 0xe9, 0xcb, 0x7f, 0xc3, - 0xd4, 0x00, 0x35, 0xa8, 0x3a, 0x23, 0x13, 0x8a, 0x3e, 0x9f, 0xea, 0xc1, 0x91, 0x47, 0xeb, 0x4f, - 0x03, 0xee, 0xbd, 0x93, 0xc4, 0x6d, 0x64, 0x93, 0xe9, 0xf9, 0x7c, 0xc1, 0x52, 0xdd, 0xea, 0x54, - 0x5b, 0xe8, 0x11, 0xec, 0xa6, 0x4b, 0x80, 0x37, 0x8b, 0x5b, 0x82, 0xe6, 0x42, 0xd2, 0xcc, 0x45, - 0x76, 0x74, 0xe1, 0x73, 0xc2, 0x56, 0x42, 0xef, 0xbf, 0xcc, 0xb4, 0xbe, 0x82, 0xfd, 0x2c, 0xcf, - 0xac, 0x4b, 0x9b, 0x90, 0x46, 0x3e, 0xa4, 0xf5, 0x2b, 0x98, 0x1b, 0xd7, 0xdb, 0xd4, 0xf2, 0x00, - 0xca, 0x4a, 0xa2, 0x6c, 0x06, 0xb7, 0xe5, 0xd3, 0x77, 0xf9, 0x5c, 0x8b, 0xdb, 0xb9, 0x3e, 0x81, - 0x3b, 0x84, 0xbd, 0x1e, 0x26, 0x7e, 0xc8, 0xfd, 0xa9, 0x08, 0xa2, 0x90, 0xeb, 0x81, 0x3a, 0x82, - 0x8a, 0x58, 0x75, 0xf3, 0x39, 0xaf, 0x6d, 0xeb, 0x1b, 0x35, 0x0d, 0xf9, 0x8f, 0x6e, 0xaa, 0xf3, - 0x8f, 0x54, 0xbb, 0xed, 0x4f, 0x3e, 0xa6, 0x76, 0x9f, 0x41, 0x51, 0xac, 0x32, 0xdd, 0xaa, 0x9a, - 0x61, 0xb8, 0xa2, 0x12, 0xfd, 0x0f, 0xa9, 0x3a, 0x70, 0xd0, 0x61, 0xe2, 0x22, 0xe0, 0x3c, 0x08, - 0xe7, 0x37, 0x14, 0x21, 0x5b, 0xc2, 0x45, 0x14, 0x2f, 0x36, 0xbb, 0x72, 0x6d, 0x5b, 0x8f, 0x00, - 0x75, 0x98, 0xb0, 0xc3, 0x29, 0xe3, 0x22, 0x4a, 0x6e, 0x6a, 0xc7, 0xef, 0x06, 0x1c, 0x6e, 0xb9, - 0xdf, 0xa6, 0x15, 0x16, 0xd4, 0x7d, 0x4d, 0x90, 0x5b, 0xdf, 0x5b, 0x98, 0x5c, 0x0b, 0x99, 0x4d, - 0xa2, 0x6c, 0x7b, 0x6f, 0x10, 0xeb, 0x4b, 0xa8, 0x75, 0x98, 0x90, 0xae, 0x67, 0xd7, 0x24, 0xca, - 0x6f, 0x09, 0x63, 0x7b, 0xed, 0xfc, 0xa8, 0x12, 0xce, 0x1c, 0x6f, 0x97, 0xf0, 0xd6, 0xca, 0x2b, - 0xbc, 0xb5, 0xf2, 0xac, 0x89, 0x7a, 0x0a, 0xe9, 0x84, 0x65, 0xfd, 0x3b, 0x82, 0x4a, 0x9c, 0xb0, - 0x57, 0xb9, 0x1d, 0xb9, 0xb6, 0xd3, 0x8d, 0xc7, 0x5e, 0x91, 0xe5, 0xd5, 0x84, 0x25, 0xd9, 0x7f, - 0xc5, 0x0d, 0xb2, 0x5e, 0x2a, 0x69, 0xd1, 0xea, 0x6c, 0x25, 0x4a, 0xee, 0x2c, 0xc6, 0xc7, 0x9c, - 0xbf, 0x0f, 0xbe, 0xb0, 0x87, 0x7f, 0x17, 0xa0, 0x9e, 0xa7, 0x42, 0x65, 0x28, 0x78, 0xcf, 0xcc, - 0x4f, 0x50, 0x1d, 0x2a, 0xe7, 0x36, 0x39, 0xc7, 0x3d, 0xec, 0x98, 0x06, 0xaa, 0xc1, 0xee, 0x88, - 0x3c, 0x23, 0xde, 0x77, 0xc4, 0x2c, 0xa0, 0x4f, 0xc1, 0x74, 0xc9, 0x73, 0xbb, 0xe7, 0x3a, 0x63, - 0x9b, 0x76, 0x46, 0x17, 0x98, 0x0c, 0xcd, 0x22, 0xba, 0x03, 0x07, 0x0e, 0xb6, 0x9d, 0x9e, 0x4b, - 0xf0, 0x18, 0xbf, 0x38, 0xc7, 0xd8, 0xc1, 0x8e, 0x59, 0x42, 0x0d, 0xa8, 0x12, 0x6f, 0x38, 0x7e, - 0xea, 0x8d, 0x88, 0x63, 0xee, 0x20, 0x04, 0x7b, 0x76, 0x8f, 0x62, 0xdb, 0xf9, 0x7e, 0x8c, 0x5f, - 0xb8, 0x83, 0xe1, 0xc0, 0x2c, 0xcb, 0x2f, 0xfb, 0x98, 0x5e, 0xb8, 0x83, 0x81, 0xeb, 0x91, 0xb1, - 0x83, 0x89, 0x8b, 0x1d, 0x73, 0x17, 0xdd, 0x05, 0x44, 0xf1, 0xc0, 0x1b, 0xd1, 0x73, 0x49, 0xd8, - 0xb5, 0x47, 0x83, 0x21, 0x76, 0xcc, 0x0a, 0xba, 0x07, 0x87, 0x4f, 0x6d, 0xb7, 0x87, 0x9d, 0x71, - 0x9f, 0xe2, 0x73, 0x8f, 0x38, 0xee, 0xd0, 0xf5, 0x88, 0x59, 0x95, 0x49, 0xda, 0x67, 0x1e, 0x95, - 0x5e, 0x80, 0x4c, 0xa8, 0x7b, 0xa3, 0xe1, 0xd8, 0x7b, 0x3a, 0xa6, 0x36, 0xe9, 0x60, 0xb3, 0x86, - 0x0e, 0xa0, 0x31, 0x22, 0xee, 0x45, 0xbf, 0x87, 0x65, 0xc6, 0xd8, 0x31, 0xeb, 0xb2, 0x48, 0x97, - 0x0c, 0x31, 0x25, 0x76, 0xcf, 0x6c, 0xa0, 0x7d, 0xa8, 0x8d, 0x88, 0xfd, 0xdc, 0x76, 0x7b, 0xf6, - 0x59, 0x0f, 0x9b, 0x7b, 0x32, 0x77, 0xc7, 0x1e, 0xda, 0xe3, 0x9e, 0x37, 0x18, 0x98, 0xfb, 0xe8, - 0x10, 0xf6, 0x47, 0xc4, 0x1e, 0x0d, 0xbb, 0x98, 0x0c, 0xdd, 0x73, 0x5b, 0x52, 0x98, 0x67, 0xad, - 0x1f, 0x8e, 0xe7, 0x81, 0x58, 0x2c, 0x27, 0x27, 0xd3, 0xe8, 0xea, 0xb1, 0xcf, 0x92, 0x79, 0x14, - 0x44, 0xe9, 0xdf, 0xc7, 0x4a, 0xa9, 0x49, 0x59, 0xfd, 0x1a, 0x7c, 0xf2, 0x6f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x62, 0x63, 0xdb, 0xf8, 0x24, 0x0b, 0x00, 0x00, + 0x7d, 0x85, 0x3e, 0x46, 0x1f, 0xa0, 0xef, 0xd4, 0x99, 0xce, 0xae, 0x56, 0x20, 0xf2, 0x53, 0x4f, + 0x3d, 0xb9, 0x62, 0xcf, 0xb7, 0x67, 0xbf, 0x3d, 0x3f, 0xdf, 0x1e, 0x04, 0xd5, 0xf8, 0x34, 0x3e, + 0x89, 0x93, 0x48, 0x44, 0x68, 0x47, 0x5c, 0xc7, 0x8c, 0x1f, 0x99, 0x93, 0xcb, 0x68, 0xfa, 0xf3, + 0x74, 0xe1, 0x07, 0x61, 0xba, 0x71, 0x04, 0x61, 0x34, 0x63, 0xe9, 0xda, 0xfa, 0xc7, 0x80, 0xea, + 0x05, 0x9f, 0x77, 0x99, 0x3f, 0x63, 0x09, 0x7a, 0x00, 0x8d, 0xe9, 0x65, 0xc0, 0x42, 0xf1, 0x9c, + 0x25, 0x3c, 0x88, 0xc2, 0xa6, 0xd1, 0x32, 0xda, 0x55, 0xba, 0x0d, 0xa2, 0xfb, 0x50, 0x15, 0xc1, + 0x15, 0xe3, 0xc2, 0xbf, 0x8a, 0x9b, 0x85, 0x96, 0xd1, 0x2e, 0xd2, 0x0d, 0x80, 0xf6, 0xa0, 0x10, + 0xcc, 0x9a, 0x45, 0x75, 0xb0, 0x10, 0xcc, 0xd0, 0x5d, 0x28, 0xcf, 0x23, 0xce, 0x83, 0xb8, 0x59, + 0x6a, 0x19, 0xed, 0x0a, 0xd5, 0x96, 0xc4, 0x63, 0xc6, 0x12, 0xd7, 0x69, 0xee, 0xb4, 0x8c, 0x76, + 0x9d, 0x6a, 0x0b, 0x1d, 0x83, 0x8a, 0xaf, 0xbf, 0x9c, 0x3c, 0x63, 0xd7, 0xcd, 0xb2, 0xda, 0xcb, + 0x21, 0x08, 0x41, 0x89, 0x07, 0xf3, 0xb0, 0xb9, 0xab, 0x76, 0xd4, 0x1a, 0xb5, 0xa0, 0xc6, 0x97, + 0x13, 0x95, 0xd1, 0x34, 0xba, 0x6c, 0x56, 0x5a, 0x46, 0xbb, 0x41, 0xf3, 0x90, 0xbc, 0xed, 0x92, + 0x85, 0x73, 0xb1, 0x68, 0x56, 0xd5, 0xa6, 0xb6, 0xac, 0x6f, 0x01, 0xfa, 0xa7, 0xfd, 0x0b, 0xc6, + 0xb9, 0x3f, 0x67, 0xa8, 0x0d, 0xe5, 0x85, 0xaa, 0x84, 0x4a, 0xbc, 0x76, 0x6a, 0x9e, 0xa8, 0x1a, + 0x9e, 0xac, 0x2b, 0x44, 0xf5, 0xbe, 0x8c, 0x62, 0xe6, 0x0b, 0x5f, 0xa5, 0x5f, 0xa7, 0x6a, 0x6d, + 0x79, 0x50, 0xea, 0x07, 0xe1, 0x1c, 0x7d, 0x01, 0xfb, 0x13, 0xc6, 0xc5, 0x58, 0x15, 0x7e, 0xbc, + 0xf0, 0xf9, 0x42, 0xd1, 0xd5, 0x69, 0x43, 0xc2, 0x67, 0x12, 0xed, 0xfa, 0x7c, 0x81, 0x3e, 0x87, + 0x9a, 0xf2, 0x5b, 0xb0, 0x60, 0xbe, 0x10, 0x8a, 0xaa, 0x44, 0x41, 0x42, 0x5d, 0x85, 0x58, 0x3d, + 0x28, 0xf5, 0xa3, 0x70, 0x2e, 0xdb, 0xb2, 0x75, 0xf2, 0xfd, 0x74, 0xc7, 0x90, 0x3b, 0xfb, 0x1e, + 0xb6, 0xbf, 0x0d, 0x28, 0x0f, 0x84, 0x2f, 0x96, 0x1c, 0x3d, 0x84, 0x32, 0x67, 0xe1, 0x26, 0x4f, + 0xa4, 0xf3, 0xec, 0x33, 0x96, 0xd8, 0xb3, 0x59, 0xc2, 0x38, 0xa7, 0xda, 0xe3, 0xdd, 0xcb, 0x0b, + 0x37, 0x5f, 0x5e, 0x7c, 0xfb, 0x72, 0xd4, 0x84, 0x5d, 0x25, 0x41, 0xd7, 0x51, 0x32, 0xa8, 0xd3, + 0xcc, 0x44, 0x47, 0x50, 0x09, 0x23, 0xbc, 0x8a, 0x23, 0xce, 0x94, 0x12, 0x2a, 0x74, 0x6d, 0xcb, + 0x53, 0xaf, 0xb4, 0x12, 0xcb, 0x4a, 0x50, 0x99, 0x69, 0xb5, 0xa1, 0xde, 0x89, 0xec, 0xd7, 0xfe, + 0x35, 0x89, 0x44, 0x30, 0x55, 0x9e, 0x57, 0x69, 0x13, 0xb5, 0x66, 0x33, 0xd3, 0x7a, 0x01, 0xa6, + 0x4e, 0x89, 0x71, 0xca, 0x7e, 0x59, 0x32, 0x2e, 0xfe, 0x57, 0xfe, 0x92, 0xd9, 0x5f, 0x0d, 0x82, + 0x37, 0x4c, 0x65, 0xde, 0xa0, 0x99, 0x69, 0xfd, 0x04, 0x07, 0x39, 0x66, 0x1e, 0x47, 0x21, 0x67, + 0xe8, 0x6b, 0x28, 0x73, 0x55, 0x64, 0x45, 0xbd, 0x77, 0x7a, 0xa8, 0xa9, 0x29, 0xe3, 0xcb, 0x4b, + 0x91, 0xd6, 0x9f, 0x6a, 0x17, 0xd4, 0x86, 0x1d, 0xa9, 0x7a, 0xde, 0x2c, 0xb4, 0x8a, 0x1f, 0x08, + 0x23, 0x75, 0xb0, 0xba, 0xb0, 0x47, 0xd8, 0x6b, 0x55, 0x6f, 0x9d, 0xf1, 0x7d, 0xa8, 0x4e, 0xde, + 0x12, 0xc4, 0x06, 0x90, 0x51, 0x4f, 0x52, 0x67, 0xad, 0x84, 0xcc, 0xb4, 0x38, 0x1c, 0x2a, 0x9a, + 0x7e, 0x12, 0xcd, 0x96, 0x53, 0x36, 0xd3, 0x74, 0xc7, 0x00, 0x71, 0x8a, 0xc8, 0x27, 0x99, 0xf2, + 0xe5, 0x90, 0x0f, 0x13, 0x22, 0x0b, 0x76, 0xd4, 0x52, 0x75, 0xbd, 0x76, 0x5a, 0xd7, 0x49, 0xa8, + 0x4b, 0x68, 0xba, 0x65, 0xfd, 0x66, 0xc0, 0xdd, 0x0e, 0xd3, 0x7a, 0x51, 0x2f, 0x68, 0xdd, 0x0b, + 0x04, 0xa5, 0xdc, 0x13, 0x51, 0x6b, 0xf9, 0x5a, 0xb7, 0x1e, 0x85, 0xb6, 0x24, 0x1e, 0xbd, 0x7c, + 0xc9, 0x59, 0xa6, 0x30, 0x6d, 0xa5, 0x33, 0xe1, 0x0d, 0x53, 0xd2, 0x6a, 0x50, 0xb5, 0x46, 0x26, + 0x14, 0x7d, 0x3e, 0xd5, 0x92, 0x92, 0x4b, 0xeb, 0x4f, 0x03, 0xee, 0xbd, 0x13, 0xc4, 0x6d, 0xda, + 0x26, 0xc3, 0xf3, 0xf9, 0x82, 0xa5, 0x7d, 0xab, 0x53, 0x6d, 0xa1, 0x47, 0xb0, 0x9b, 0x8e, 0x07, + 0xde, 0x2c, 0x6e, 0x35, 0x34, 0x77, 0x25, 0xcd, 0x5c, 0x64, 0x45, 0x17, 0x3e, 0x27, 0x6c, 0x25, + 0xf4, 0x64, 0xcc, 0x4c, 0xeb, 0x2b, 0xd8, 0xcf, 0xe2, 0xcc, 0xaa, 0xb4, 0xb9, 0xd2, 0xc8, 0x5f, + 0x69, 0xfd, 0x0a, 0xe6, 0xc6, 0xf5, 0x36, 0xb9, 0x3c, 0x80, 0xb2, 0x6a, 0x51, 0xa6, 0xc1, 0xed, + 0xf6, 0xe9, 0xbd, 0x7c, 0xac, 0xc5, 0xed, 0x58, 0x9f, 0xc0, 0x1d, 0xc2, 0x5e, 0x0f, 0x13, 0x3f, + 0xe4, 0xfe, 0x54, 0x04, 0x51, 0xc8, 0xb5, 0xa0, 0x8e, 0xa0, 0x22, 0x56, 0xdd, 0x7c, 0xcc, 0x6b, + 0xdb, 0xfa, 0x46, 0xa9, 0x21, 0x7f, 0xe8, 0xa6, 0x3c, 0xff, 0x48, 0x7b, 0xb7, 0x7d, 0xe4, 0x63, + 0xf6, 0xee, 0x33, 0x28, 0x8a, 0x55, 0xd6, 0xb7, 0xaa, 0x66, 0x18, 0xae, 0xa8, 0x44, 0xff, 0xa3, + 0x55, 0x1d, 0x38, 0xe8, 0x30, 0x71, 0x11, 0x70, 0x1e, 0x84, 0xf3, 0x1b, 0x92, 0x90, 0x25, 0xe1, + 0x22, 0x8a, 0x17, 0x9b, 0x29, 0xba, 0xb6, 0xad, 0x47, 0x80, 0x3a, 0x4c, 0xd8, 0xe1, 0x94, 0x71, + 0x11, 0x25, 0x37, 0x95, 0xe3, 0x77, 0x03, 0x0e, 0xb7, 0xdc, 0x6f, 0x53, 0x0a, 0x0b, 0xea, 0xbe, + 0x26, 0xc8, 0x0d, 0xf6, 0x2d, 0x4c, 0x8e, 0x85, 0xcc, 0x26, 0x51, 0x36, 0xd7, 0x37, 0x88, 0xf5, + 0x25, 0xd4, 0x3a, 0x4c, 0x48, 0xd7, 0xb3, 0x6b, 0x12, 0xe5, 0xa7, 0x84, 0xb1, 0x3d, 0x76, 0x7e, + 0x54, 0x01, 0x67, 0x8e, 0xb7, 0x0b, 0x78, 0x6b, 0xe4, 0x15, 0xde, 0x1a, 0x79, 0xd6, 0x44, 0x3d, + 0x85, 0x54, 0x61, 0x59, 0xfd, 0x8e, 0xa0, 0x12, 0x27, 0xec, 0x55, 0x6e, 0x46, 0xae, 0xed, 0x74, + 0xe2, 0xb1, 0x57, 0x64, 0x79, 0x35, 0x61, 0x49, 0xf6, 0x7f, 0xb9, 0x41, 0xd6, 0x43, 0x25, 0x4d, + 0x5a, 0xad, 0xad, 0x44, 0xb5, 0x3b, 0xbb, 0xe3, 0x63, 0xea, 0xef, 0x83, 0x2f, 0xec, 0xe1, 0x5f, + 0x05, 0xa8, 0xe7, 0xa9, 0x50, 0x19, 0x0a, 0xde, 0x33, 0xf3, 0x13, 0x54, 0x87, 0xca, 0xb9, 0x4d, + 0xce, 0x71, 0x0f, 0x3b, 0xa6, 0x81, 0x6a, 0xb0, 0x3b, 0x22, 0xcf, 0x88, 0xf7, 0x1d, 0x31, 0x0b, + 0xe8, 0x53, 0x30, 0x5d, 0xf2, 0xdc, 0xee, 0xb9, 0xce, 0xd8, 0xa6, 0x9d, 0xd1, 0x05, 0x26, 0x43, + 0xb3, 0x88, 0xee, 0xc0, 0x81, 0x83, 0x6d, 0xa7, 0xe7, 0x12, 0x3c, 0xc6, 0x2f, 0xce, 0x31, 0x76, + 0xb0, 0x63, 0x96, 0x50, 0x03, 0xaa, 0xc4, 0x1b, 0x8e, 0x9f, 0x7a, 0x23, 0xe2, 0x98, 0x3b, 0x08, + 0xc1, 0x9e, 0xdd, 0xa3, 0xd8, 0x76, 0xbe, 0x1f, 0xe3, 0x17, 0xee, 0x60, 0x38, 0x30, 0xcb, 0xf2, + 0x64, 0x1f, 0xd3, 0x0b, 0x77, 0x30, 0x70, 0x3d, 0x32, 0x76, 0x30, 0x71, 0xb1, 0x63, 0xee, 0xa2, + 0xbb, 0x80, 0x28, 0x1e, 0x78, 0x23, 0x7a, 0x2e, 0x09, 0xbb, 0xf6, 0x68, 0x30, 0xc4, 0x8e, 0x59, + 0x41, 0xf7, 0xe0, 0xf0, 0xa9, 0xed, 0xf6, 0xb0, 0x33, 0xee, 0x53, 0x7c, 0xee, 0x11, 0xc7, 0x1d, + 0xba, 0x1e, 0x31, 0xab, 0x32, 0x48, 0xfb, 0xcc, 0xa3, 0xd2, 0x0b, 0x90, 0x09, 0x75, 0x6f, 0x34, + 0x1c, 0x7b, 0x4f, 0xc7, 0xd4, 0x26, 0x1d, 0x6c, 0xd6, 0xd0, 0x01, 0x34, 0x46, 0xc4, 0xbd, 0xe8, + 0xf7, 0xb0, 0x8c, 0x18, 0x3b, 0x66, 0x5d, 0x26, 0xe9, 0x92, 0x21, 0xa6, 0xc4, 0xee, 0x99, 0x0d, + 0xb4, 0x0f, 0xb5, 0x11, 0xb1, 0x9f, 0xdb, 0x6e, 0xcf, 0x3e, 0xeb, 0x61, 0x73, 0x4f, 0xc6, 0xee, + 0xd8, 0x43, 0x7b, 0xdc, 0xf3, 0x06, 0x03, 0x73, 0x1f, 0x1d, 0xc2, 0xfe, 0x88, 0xd8, 0xa3, 0x61, + 0x17, 0x93, 0xa1, 0x7b, 0x6e, 0x4b, 0x0a, 0xf3, 0xac, 0xf5, 0xc3, 0xf1, 0x3c, 0x10, 0x8b, 0xe5, + 0xe4, 0x64, 0x1a, 0x5d, 0x3d, 0xf6, 0x59, 0x32, 0x8f, 0x82, 0x28, 0xfd, 0x7d, 0xac, 0x3a, 0x35, + 0x29, 0xab, 0xef, 0xc4, 0x27, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x9f, 0x40, 0xe2, 0x3e, + 0x0b, 0x00, 0x00, } diff --git a/types/p2pmore.go b/types/p2pmore.go index 03222ca9e..7fcac82ad 100644 --- a/types/p2pmore.go +++ b/types/p2pmore.go @@ -13,15 +13,10 @@ import ( "github.com/libp2p/go-libp2p-peer" ) -// PeerAccessor is an interface for a another actor module to get info of peers -type PeerAccessor interface { - GetPeerBlockInfos() []PeerBlockInfo -} - type PeerBlockInfo interface { ID() peer.ID State() PeerState - LastNotice() *LastBlockStatus + LastStatus() *LastBlockStatus } // LastBlockStatus i @@ -31,7 +26,6 @@ type LastBlockStatus struct { BlockNumber uint64 } - // ResponseMessage contains response status type ResponseMessage interface { GetStatus() ResultStatus diff --git a/types/pmap.pb.go b/types/pmap.pb.go index 11ff6e2ca..81967566d 100644 --- a/types/pmap.pb.go +++ b/types/pmap.pb.go @@ -1,11 +1,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: pmap.proto -package types // import "github.com/aergoio/aergo/types" +package types -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -20,9 +22,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // query to polaris type MapQuery struct { - Status *Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` - AddMe bool `protobuf:"varint,2,opt,name=addMe" json:"addMe,omitempty"` - Size int32 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + AddMe bool `protobuf:"varint,2,opt,name=addMe,proto3" json:"addMe,omitempty"` + Size int32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` Excludes [][]byte `protobuf:"bytes,4,rep,name=excludes,proto3" json:"excludes,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -33,16 +35,17 @@ func (m *MapQuery) Reset() { *m = MapQuery{} } func (m *MapQuery) String() string { return proto.CompactTextString(m) } func (*MapQuery) ProtoMessage() {} func (*MapQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_pmap_0a38776fb17418f4, []int{0} + return fileDescriptor_c373235fa8835f5c, []int{0} } + func (m *MapQuery) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MapQuery.Unmarshal(m, b) } func (m *MapQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MapQuery.Marshal(b, m, deterministic) } -func (dst *MapQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_MapQuery.Merge(dst, src) +func (m *MapQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_MapQuery.Merge(m, src) } func (m *MapQuery) XXX_Size() int { return xxx_messageInfo_MapQuery.Size(m) @@ -82,9 +85,9 @@ func (m *MapQuery) GetExcludes() [][]byte { } type MapResponse struct { - Status ResultStatus `protobuf:"varint,1,opt,name=status,enum=types.ResultStatus" json:"status,omitempty"` - Addresses []*PeerAddress `protobuf:"bytes,2,rep,name=addresses" json:"addresses,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` + Status ResultStatus `protobuf:"varint,1,opt,name=status,proto3,enum=types.ResultStatus" json:"status,omitempty"` + Addresses []*PeerAddress `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -94,16 +97,17 @@ func (m *MapResponse) Reset() { *m = MapResponse{} } func (m *MapResponse) String() string { return proto.CompactTextString(m) } func (*MapResponse) ProtoMessage() {} func (*MapResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_pmap_0a38776fb17418f4, []int{1} + return fileDescriptor_c373235fa8835f5c, []int{1} } + func (m *MapResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MapResponse.Unmarshal(m, b) } func (m *MapResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MapResponse.Marshal(b, m, deterministic) } -func (dst *MapResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MapResponse.Merge(dst, src) +func (m *MapResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MapResponse.Merge(m, src) } func (m *MapResponse) XXX_Size() int { return xxx_messageInfo_MapResponse.Size(m) @@ -140,9 +144,9 @@ func init() { proto.RegisterType((*MapResponse)(nil), "types.MapResponse") } -func init() { proto.RegisterFile("pmap.proto", fileDescriptor_pmap_0a38776fb17418f4) } +func init() { proto.RegisterFile("pmap.proto", fileDescriptor_c373235fa8835f5c) } -var fileDescriptor_pmap_0a38776fb17418f4 = []byte{ +var fileDescriptor_c373235fa8835f5c = []byte{ // 251 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x50, 0xcd, 0x4a, 0xc4, 0x30, 0x10, 0xa6, 0xdb, 0xed, 0xda, 0x4e, 0xd5, 0x43, 0xf4, 0x10, 0x7a, 0x90, 0xb0, 0x20, 0x14, 0x84, diff --git a/types/polarrpc.pb.go b/types/polarrpc.pb.go index f77b5081d..f0ad32554 100644 --- a/types/polarrpc.pb.go +++ b/types/polarrpc.pb.go @@ -1,15 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: polarrpc.proto -package types // import "github.com/aergoio/aergo/types" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package types import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -25,7 +24,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Paginations struct { Ref []byte `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - Size uint32 `protobuf:"varint,3,opt,name=size" json:"size,omitempty"` + Size uint32 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -35,16 +34,17 @@ func (m *Paginations) Reset() { *m = Paginations{} } func (m *Paginations) String() string { return proto.CompactTextString(m) } func (*Paginations) ProtoMessage() {} func (*Paginations) Descriptor() ([]byte, []int) { - return fileDescriptor_polarrpc_90e1fbe18c16c515, []int{0} + return fileDescriptor_9eae49c68867e2c2, []int{0} } + func (m *Paginations) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Paginations.Unmarshal(m, b) } func (m *Paginations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Paginations.Marshal(b, m, deterministic) } -func (dst *Paginations) XXX_Merge(src proto.Message) { - xxx_messageInfo_Paginations.Merge(dst, src) +func (m *Paginations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Paginations.Merge(m, src) } func (m *Paginations) XXX_Size() int { return xxx_messageInfo_Paginations.Size(m) @@ -70,9 +70,9 @@ func (m *Paginations) GetSize() uint32 { } type PolarisPeerList struct { - Total uint32 `protobuf:"varint,1,opt,name=total" json:"total,omitempty"` - HasNext bool `protobuf:"varint,2,opt,name=hasNext" json:"hasNext,omitempty"` - Peers []*PolarisPeer `protobuf:"bytes,3,rep,name=peers" json:"peers,omitempty"` + Total uint32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + HasNext bool `protobuf:"varint,2,opt,name=hasNext,proto3" json:"hasNext,omitempty"` + Peers []*PolarisPeer `protobuf:"bytes,3,rep,name=peers,proto3" json:"peers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -82,16 +82,17 @@ func (m *PolarisPeerList) Reset() { *m = PolarisPeerList{} } func (m *PolarisPeerList) String() string { return proto.CompactTextString(m) } func (*PolarisPeerList) ProtoMessage() {} func (*PolarisPeerList) Descriptor() ([]byte, []int) { - return fileDescriptor_polarrpc_90e1fbe18c16c515, []int{1} + return fileDescriptor_9eae49c68867e2c2, []int{1} } + func (m *PolarisPeerList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PolarisPeerList.Unmarshal(m, b) } func (m *PolarisPeerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PolarisPeerList.Marshal(b, m, deterministic) } -func (dst *PolarisPeerList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolarisPeerList.Merge(dst, src) +func (m *PolarisPeerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolarisPeerList.Merge(m, src) } func (m *PolarisPeerList) XXX_Size() int { return xxx_messageInfo_PolarisPeerList.Size(m) @@ -124,10 +125,11 @@ func (m *PolarisPeerList) GetPeers() []*PolarisPeer { } type PolarisPeer struct { - Address *PeerAddress `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` - Connected int64 `protobuf:"varint,2,opt,name=connected" json:"connected,omitempty"` + Address *PeerAddress `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Connected int64 `protobuf:"varint,2,opt,name=connected,proto3" json:"connected,omitempty"` // lastCheck contains unixtimestamp with nanoseconds precision - LastCheck int64 `protobuf:"varint,3,opt,name=lastCheck" json:"lastCheck,omitempty"` + LastCheck int64 `protobuf:"varint,3,opt,name=lastCheck,proto3" json:"lastCheck,omitempty"` + Verion string `protobuf:"bytes,4,opt,name=verion,proto3" json:"verion,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -137,16 +139,17 @@ func (m *PolarisPeer) Reset() { *m = PolarisPeer{} } func (m *PolarisPeer) String() string { return proto.CompactTextString(m) } func (*PolarisPeer) ProtoMessage() {} func (*PolarisPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_polarrpc_90e1fbe18c16c515, []int{2} + return fileDescriptor_9eae49c68867e2c2, []int{2} } + func (m *PolarisPeer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PolarisPeer.Unmarshal(m, b) } func (m *PolarisPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PolarisPeer.Marshal(b, m, deterministic) } -func (dst *PolarisPeer) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolarisPeer.Merge(dst, src) +func (m *PolarisPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolarisPeer.Merge(m, src) } func (m *PolarisPeer) XXX_Size() int { return xxx_messageInfo_PolarisPeer.Size(m) @@ -178,12 +181,51 @@ func (m *PolarisPeer) GetLastCheck() int64 { return 0 } +func (m *PolarisPeer) GetVerion() string { + if m != nil { + return m.Verion + } + return "" +} + func init() { proto.RegisterType((*Paginations)(nil), "types.Paginations") proto.RegisterType((*PolarisPeerList)(nil), "types.PolarisPeerList") proto.RegisterType((*PolarisPeer)(nil), "types.PolarisPeer") } +func init() { proto.RegisterFile("polarrpc.proto", fileDescriptor_9eae49c68867e2c2) } + +var fileDescriptor_9eae49c68867e2c2 = []byte{ + // 401 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x8d, 0xeb, 0x26, 0xc5, 0x93, 0x36, 0xc0, 0x08, 0x2a, 0xcb, 0x42, 0xc8, 0xf2, 0xc9, 0x07, + 0x94, 0xaa, 0xed, 0x09, 0x71, 0x22, 0xb9, 0x42, 0x15, 0x6d, 0x0e, 0x48, 0xdc, 0xb6, 0xf6, 0x60, + 0xaf, 0xe2, 0xee, 0xba, 0xbb, 0x93, 0x8a, 0xf2, 0x13, 0xfc, 0x08, 0x1f, 0x89, 0xbc, 0x76, 0x9a, + 0x20, 0x4e, 0x70, 0xf2, 0xbe, 0xf7, 0xe6, 0x79, 0xdf, 0xcc, 0x2c, 0xcc, 0x5a, 0xd3, 0x48, 0x6b, + 0xdb, 0x62, 0xde, 0x5a, 0xc3, 0x06, 0xc7, 0xfc, 0xd8, 0x92, 0x4b, 0x40, 0x9b, 0x92, 0x7a, 0x2a, + 0x89, 0x9e, 0xd4, 0xe4, 0xf4, 0x8e, 0xd8, 0xaa, 0x01, 0x65, 0xd7, 0x30, 0x5d, 0xc9, 0x4a, 0x69, + 0xc9, 0xca, 0x68, 0x87, 0x2f, 0x20, 0xb4, 0xf4, 0x2d, 0x0e, 0xd2, 0x20, 0x3f, 0x15, 0xdd, 0x11, + 0x11, 0x8e, 0x9d, 0xfa, 0x41, 0x71, 0x98, 0x06, 0xf9, 0x99, 0xf0, 0xe7, 0x6c, 0x03, 0xcf, 0x57, + 0xdd, 0x95, 0xca, 0xad, 0x88, 0xec, 0x27, 0xe5, 0x18, 0x5f, 0xc1, 0x98, 0x0d, 0xcb, 0xc6, 0x5b, + 0xcf, 0x44, 0x0f, 0x30, 0x86, 0x93, 0x5a, 0xba, 0x1b, 0xfa, 0xce, 0xf1, 0x51, 0x1a, 0xe4, 0xcf, + 0xc4, 0x0e, 0x62, 0x0e, 0xe3, 0x96, 0xc8, 0xba, 0x38, 0x4c, 0xc3, 0x7c, 0x7a, 0x85, 0x73, 0x9f, + 0x79, 0x7e, 0xf0, 0x5b, 0xd1, 0x17, 0x64, 0x3f, 0x03, 0x98, 0x1e, 0xd0, 0xf8, 0x0e, 0x4e, 0x64, + 0x59, 0x5a, 0x72, 0xce, 0xdf, 0x75, 0xe0, 0x25, 0xb2, 0x1f, 0x7b, 0x45, 0xec, 0x4a, 0xf0, 0x0d, + 0x44, 0x85, 0xd1, 0x9a, 0x0a, 0xa6, 0xd2, 0x67, 0x08, 0xc5, 0x9e, 0xe8, 0xd4, 0x46, 0x3a, 0x5e, + 0xd6, 0x54, 0x6c, 0x7c, 0x87, 0xa1, 0xd8, 0x13, 0x78, 0x0e, 0x93, 0x07, 0xb2, 0xca, 0xe8, 0xf8, + 0x38, 0x0d, 0xf2, 0x48, 0x0c, 0xe8, 0xea, 0xd7, 0x11, 0xbc, 0x1c, 0x12, 0x89, 0xd5, 0x72, 0x4d, + 0xf6, 0x41, 0x15, 0x84, 0x97, 0x10, 0xdd, 0x98, 0x92, 0xd6, 0x2c, 0x99, 0x70, 0x36, 0x64, 0xea, + 0x18, 0x41, 0xf7, 0xc9, 0x2e, 0xe3, 0x5a, 0xe9, 0xaa, 0xa1, 0xc5, 0x23, 0x93, 0xcb, 0x46, 0x78, + 0x09, 0x93, 0xcf, 0x7e, 0x19, 0xf8, 0x7a, 0xd0, 0x7b, 0xe8, 0x04, 0xdd, 0x6f, 0xc9, 0x71, 0x32, + 0xfb, 0x93, 0xce, 0x46, 0xf8, 0x01, 0xa6, 0xcb, 0xad, 0xb5, 0xa4, 0xd9, 0x8f, 0xfd, 0xa9, 0xf7, + 0xfd, 0x0e, 0x93, 0xf3, 0xbf, 0x67, 0xd9, 0xd5, 0x66, 0x23, 0x7c, 0x0f, 0xd1, 0x97, 0x5a, 0x31, + 0xfd, 0x9f, 0x75, 0xd1, 0xc8, 0x62, 0xf3, 0xef, 0xd6, 0x45, 0xfa, 0xf5, 0x6d, 0xa5, 0xb8, 0xde, + 0xde, 0xce, 0x0b, 0x73, 0x77, 0x21, 0xc9, 0x56, 0x46, 0x99, 0xfe, 0x7b, 0xe1, 0x3d, 0xb7, 0x13, + 0xff, 0x16, 0xaf, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x02, 0xd2, 0x28, 0x55, 0xc9, 0x02, 0x00, + 0x00, +} + // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -192,8 +234,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for PolarisRPCService service - +// PolarisRPCServiceClient is the client API for PolarisRPCService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type PolarisRPCServiceClient interface { // Returns the current state of this node NodeState(ctx context.Context, in *NodeReq, opts ...grpc.CallOption) (*SingleBytes, error) @@ -214,7 +257,7 @@ func NewPolarisRPCServiceClient(cc *grpc.ClientConn) PolarisRPCServiceClient { func (c *polarisRPCServiceClient) NodeState(ctx context.Context, in *NodeReq, opts ...grpc.CallOption) (*SingleBytes, error) { out := new(SingleBytes) - err := grpc.Invoke(ctx, "/types.PolarisRPCService/NodeState", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/types.PolarisRPCService/NodeState", in, out, opts...) if err != nil { return nil, err } @@ -223,7 +266,7 @@ func (c *polarisRPCServiceClient) NodeState(ctx context.Context, in *NodeReq, op func (c *polarisRPCServiceClient) Metric(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*Metrics, error) { out := new(Metrics) - err := grpc.Invoke(ctx, "/types.PolarisRPCService/Metric", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/types.PolarisRPCService/Metric", in, out, opts...) if err != nil { return nil, err } @@ -232,7 +275,7 @@ func (c *polarisRPCServiceClient) Metric(ctx context.Context, in *MetricsRequest func (c *polarisRPCServiceClient) CurrentList(ctx context.Context, in *Paginations, opts ...grpc.CallOption) (*PolarisPeerList, error) { out := new(PolarisPeerList) - err := grpc.Invoke(ctx, "/types.PolarisRPCService/CurrentList", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/types.PolarisRPCService/CurrentList", in, out, opts...) if err != nil { return nil, err } @@ -241,7 +284,7 @@ func (c *polarisRPCServiceClient) CurrentList(ctx context.Context, in *Paginatio func (c *polarisRPCServiceClient) WhiteList(ctx context.Context, in *Paginations, opts ...grpc.CallOption) (*PolarisPeerList, error) { out := new(PolarisPeerList) - err := grpc.Invoke(ctx, "/types.PolarisRPCService/WhiteList", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/types.PolarisRPCService/WhiteList", in, out, opts...) if err != nil { return nil, err } @@ -250,15 +293,14 @@ func (c *polarisRPCServiceClient) WhiteList(ctx context.Context, in *Paginations func (c *polarisRPCServiceClient) BlackList(ctx context.Context, in *Paginations, opts ...grpc.CallOption) (*PolarisPeerList, error) { out := new(PolarisPeerList) - err := grpc.Invoke(ctx, "/types.PolarisRPCService/BlackList", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/types.PolarisRPCService/BlackList", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for PolarisRPCService service - +// PolarisRPCServiceServer is the server API for PolarisRPCService service. type PolarisRPCServiceServer interface { // Returns the current state of this node NodeState(context.Context, *NodeReq) (*SingleBytes, error) @@ -391,33 +433,3 @@ var _PolarisRPCService_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "polarrpc.proto", } - -func init() { proto.RegisterFile("polarrpc.proto", fileDescriptor_polarrpc_90e1fbe18c16c515) } - -var fileDescriptor_polarrpc_90e1fbe18c16c515 = []byte{ - // 384 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x41, 0x6f, 0x94, 0x50, - 0x10, 0x5e, 0x8a, 0xdb, 0xca, 0xd0, 0xae, 0x3a, 0x51, 0x43, 0x88, 0x31, 0x84, 0x13, 0x07, 0x43, - 0xd3, 0xf6, 0x64, 0x3c, 0xb9, 0x7b, 0xd5, 0x86, 0xbc, 0x3d, 0x98, 0x78, 0x7b, 0x85, 0x11, 0x5e, - 0x96, 0xf2, 0xe8, 0x7b, 0xb3, 0x6a, 0xfd, 0x4d, 0xfe, 0x48, 0xc3, 0x83, 0xed, 0xae, 0xf1, 0x64, - 0x4f, 0xcc, 0xf7, 0x7d, 0xf3, 0xf1, 0xcd, 0x0c, 0xc0, 0xa2, 0xd7, 0xad, 0x34, 0xa6, 0x2f, 0xf3, - 0xde, 0x68, 0xd6, 0x38, 0xe7, 0xfb, 0x9e, 0x6c, 0x0c, 0x9d, 0xae, 0x68, 0xa4, 0xe2, 0xe0, 0x41, - 0x8d, 0x4f, 0x6f, 0x89, 0x8d, 0x9a, 0x50, 0x7a, 0x05, 0x61, 0x21, 0x6b, 0xd5, 0x49, 0x56, 0xba, - 0xb3, 0xf8, 0x1c, 0x7c, 0x43, 0xdf, 0x22, 0x2f, 0xf1, 0xb2, 0x53, 0x31, 0x94, 0x88, 0xf0, 0xc4, - 0xaa, 0x5f, 0x14, 0xf9, 0x89, 0x97, 0x9d, 0x09, 0x57, 0xa7, 0x1b, 0x78, 0x56, 0x0c, 0x91, 0xca, - 0x16, 0x44, 0xe6, 0x93, 0xb2, 0x8c, 0x2f, 0x61, 0xce, 0x9a, 0x65, 0xeb, 0xac, 0x67, 0x62, 0x04, - 0x18, 0xc1, 0x49, 0x23, 0xed, 0x35, 0xfd, 0xe4, 0xe8, 0x28, 0xf1, 0xb2, 0xa7, 0x62, 0x07, 0x31, - 0x83, 0x79, 0x4f, 0x64, 0x6c, 0xe4, 0x27, 0x7e, 0x16, 0x5e, 0x62, 0xee, 0x66, 0xce, 0x0f, 0x5e, - 0x2b, 0xc6, 0x86, 0xf4, 0x07, 0x84, 0x07, 0x2c, 0xbe, 0x83, 0x13, 0x59, 0x55, 0x86, 0xac, 0x75, - 0x51, 0x07, 0x56, 0x22, 0xf3, 0x71, 0x54, 0xc4, 0xae, 0x05, 0xdf, 0x40, 0x50, 0xea, 0xae, 0xa3, - 0x92, 0xa9, 0x72, 0x23, 0xf8, 0x62, 0x4f, 0x0c, 0x6a, 0x2b, 0x2d, 0xaf, 0x1a, 0x2a, 0x37, 0x6e, - 0x41, 0x5f, 0xec, 0x89, 0xcb, 0xdf, 0x47, 0xf0, 0x62, 0x4a, 0x16, 0xc5, 0x6a, 0x4d, 0xe6, 0xbb, - 0x2a, 0x09, 0x2f, 0x20, 0xb8, 0xd6, 0x15, 0xad, 0x59, 0x32, 0xe1, 0x62, 0xca, 0x1e, 0x18, 0x41, - 0x77, 0xf1, 0x6e, 0x96, 0xb5, 0xea, 0xea, 0x96, 0x96, 0xf7, 0x4c, 0x36, 0x9d, 0xe1, 0x05, 0x1c, - 0x7f, 0x76, 0x37, 0xc7, 0x57, 0x93, 0x3e, 0x42, 0x2b, 0xe8, 0x6e, 0x4b, 0x96, 0xe3, 0xc5, 0xdf, - 0x74, 0x3a, 0xc3, 0x0f, 0x10, 0xae, 0xb6, 0xc6, 0x50, 0xc7, 0xee, 0xba, 0x0f, 0x3b, 0xee, 0x3f, - 0x55, 0xfc, 0xfa, 0xdf, 0x93, 0x0d, 0xbd, 0xe9, 0x0c, 0xdf, 0x43, 0xf0, 0xa5, 0x51, 0x4c, 0x8f, - 0xb3, 0x2e, 0x5b, 0x59, 0x6e, 0xfe, 0xdf, 0xba, 0x4c, 0xbe, 0xbe, 0xad, 0x15, 0x37, 0xdb, 0x9b, - 0xbc, 0xd4, 0xb7, 0xe7, 0x92, 0x4c, 0xad, 0x95, 0x1e, 0x9f, 0xe7, 0xce, 0x73, 0x73, 0xec, 0x7e, - 0xb9, 0xab, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x86, 0xe3, 0x21, 0xfc, 0xb0, 0x02, 0x00, 0x00, -} diff --git a/types/raft.go b/types/raft.go new file mode 100644 index 000000000..f51e902a5 --- /dev/null +++ b/types/raft.go @@ -0,0 +1,22 @@ +package types + +import ( + "fmt" + "github.com/libp2p/go-libp2p-peer" +) + +func (mc *MembershipChange) ToString() string { + var buf string + + buf = fmt.Sprintf("type:%s,", MembershipChangeType_name[int32(mc.Type)]) + + buf = buf + mc.Attr.ToString() + return buf +} + +func (mattr *MemberAttr) ToString() string { + var buf string + + buf = fmt.Sprintf("{ name=%s, url=%s, peerid=%s, id=%x }", mattr.Name, mattr.Url, peer.ID(mattr.PeerID).Pretty(), mattr.ID) + return buf +} diff --git a/types/raft.pb.go b/types/raft.pb.go new file mode 100644 index 000000000..fb72fc2e8 --- /dev/null +++ b/types/raft.pb.go @@ -0,0 +1,320 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: raft.proto + +package types + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// cluster member for raft consensus +type MembershipChangeType int32 + +const ( + MembershipChangeType_ADD_MEMBER MembershipChangeType = 0 + MembershipChangeType_REMOVE_MEMBER MembershipChangeType = 1 +) + +var MembershipChangeType_name = map[int32]string{ + 0: "ADD_MEMBER", + 1: "REMOVE_MEMBER", +} + +var MembershipChangeType_value = map[string]int32{ + "ADD_MEMBER": 0, + "REMOVE_MEMBER": 1, +} + +func (x MembershipChangeType) String() string { + return proto.EnumName(MembershipChangeType_name, int32(x)) +} + +func (MembershipChangeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{0} +} + +type MemberAttr struct { + ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` + PeerID []byte `protobuf:"bytes,4,opt,name=peerID,proto3" json:"peerID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemberAttr) Reset() { *m = MemberAttr{} } +func (m *MemberAttr) String() string { return proto.CompactTextString(m) } +func (*MemberAttr) ProtoMessage() {} +func (*MemberAttr) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{0} +} + +func (m *MemberAttr) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MemberAttr.Unmarshal(m, b) +} +func (m *MemberAttr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MemberAttr.Marshal(b, m, deterministic) +} +func (m *MemberAttr) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemberAttr.Merge(m, src) +} +func (m *MemberAttr) XXX_Size() int { + return xxx_messageInfo_MemberAttr.Size(m) +} +func (m *MemberAttr) XXX_DiscardUnknown() { + xxx_messageInfo_MemberAttr.DiscardUnknown(m) +} + +var xxx_messageInfo_MemberAttr proto.InternalMessageInfo + +func (m *MemberAttr) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberAttr) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MemberAttr) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *MemberAttr) GetPeerID() []byte { + if m != nil { + return m.PeerID + } + return nil +} + +type MembershipChange struct { + Type MembershipChangeType `protobuf:"varint,1,opt,name=type,proto3,enum=types.MembershipChangeType" json:"type,omitempty"` + Attr *MemberAttr `protobuf:"bytes,2,opt,name=attr,proto3" json:"attr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MembershipChange) Reset() { *m = MembershipChange{} } +func (m *MembershipChange) String() string { return proto.CompactTextString(m) } +func (*MembershipChange) ProtoMessage() {} +func (*MembershipChange) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{1} +} + +func (m *MembershipChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MembershipChange.Unmarshal(m, b) +} +func (m *MembershipChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MembershipChange.Marshal(b, m, deterministic) +} +func (m *MembershipChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_MembershipChange.Merge(m, src) +} +func (m *MembershipChange) XXX_Size() int { + return xxx_messageInfo_MembershipChange.Size(m) +} +func (m *MembershipChange) XXX_DiscardUnknown() { + xxx_messageInfo_MembershipChange.DiscardUnknown(m) +} + +var xxx_messageInfo_MembershipChange proto.InternalMessageInfo + +func (m *MembershipChange) GetType() MembershipChangeType { + if m != nil { + return m.Type + } + return MembershipChangeType_ADD_MEMBER +} + +func (m *MembershipChange) GetAttr() *MemberAttr { + if m != nil { + return m.Attr + } + return nil +} + +type MembershipChangeReply struct { + Attr *MemberAttr `protobuf:"bytes,1,opt,name=attr,proto3" json:"attr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MembershipChangeReply) Reset() { *m = MembershipChangeReply{} } +func (m *MembershipChangeReply) String() string { return proto.CompactTextString(m) } +func (*MembershipChangeReply) ProtoMessage() {} +func (*MembershipChangeReply) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{2} +} + +func (m *MembershipChangeReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MembershipChangeReply.Unmarshal(m, b) +} +func (m *MembershipChangeReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MembershipChangeReply.Marshal(b, m, deterministic) +} +func (m *MembershipChangeReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_MembershipChangeReply.Merge(m, src) +} +func (m *MembershipChangeReply) XXX_Size() int { + return xxx_messageInfo_MembershipChangeReply.Size(m) +} +func (m *MembershipChangeReply) XXX_DiscardUnknown() { + xxx_messageInfo_MembershipChangeReply.DiscardUnknown(m) +} + +var xxx_messageInfo_MembershipChangeReply proto.InternalMessageInfo + +func (m *MembershipChangeReply) GetAttr() *MemberAttr { + if m != nil { + return m.Attr + } + return nil +} + +// data types for raft support +// GetClusterInfoRequest +type GetClusterInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterInfoRequest) Reset() { *m = GetClusterInfoRequest{} } +func (m *GetClusterInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterInfoRequest) ProtoMessage() {} +func (*GetClusterInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{3} +} + +func (m *GetClusterInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterInfoRequest.Unmarshal(m, b) +} +func (m *GetClusterInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterInfoRequest.Marshal(b, m, deterministic) +} +func (m *GetClusterInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterInfoRequest.Merge(m, src) +} +func (m *GetClusterInfoRequest) XXX_Size() int { + return xxx_messageInfo_GetClusterInfoRequest.Size(m) +} +func (m *GetClusterInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterInfoRequest proto.InternalMessageInfo + +type GetClusterInfoResponse struct { + ChainID []byte `protobuf:"bytes,1,opt,name=chainID,proto3" json:"chainID,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + MbrAttrs []*MemberAttr `protobuf:"bytes,3,rep,name=mbrAttrs,proto3" json:"mbrAttrs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetClusterInfoResponse) Reset() { *m = GetClusterInfoResponse{} } +func (m *GetClusterInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetClusterInfoResponse) ProtoMessage() {} +func (*GetClusterInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b042552c306ae59b, []int{4} +} + +func (m *GetClusterInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetClusterInfoResponse.Unmarshal(m, b) +} +func (m *GetClusterInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetClusterInfoResponse.Marshal(b, m, deterministic) +} +func (m *GetClusterInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetClusterInfoResponse.Merge(m, src) +} +func (m *GetClusterInfoResponse) XXX_Size() int { + return xxx_messageInfo_GetClusterInfoResponse.Size(m) +} +func (m *GetClusterInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetClusterInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetClusterInfoResponse proto.InternalMessageInfo + +func (m *GetClusterInfoResponse) GetChainID() []byte { + if m != nil { + return m.ChainID + } + return nil +} + +func (m *GetClusterInfoResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *GetClusterInfoResponse) GetMbrAttrs() []*MemberAttr { + if m != nil { + return m.MbrAttrs + } + return nil +} + +func init() { + proto.RegisterEnum("types.MembershipChangeType", MembershipChangeType_name, MembershipChangeType_value) + proto.RegisterType((*MemberAttr)(nil), "types.MemberAttr") + proto.RegisterType((*MembershipChange)(nil), "types.MembershipChange") + proto.RegisterType((*MembershipChangeReply)(nil), "types.MembershipChangeReply") + proto.RegisterType((*GetClusterInfoRequest)(nil), "types.GetClusterInfoRequest") + proto.RegisterType((*GetClusterInfoResponse)(nil), "types.GetClusterInfoResponse") +} + +func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) } + +var fileDescriptor_b042552c306ae59b = []byte{ + // 333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0x4d, 0x6b, 0xc2, 0x40, + 0x14, 0x6c, 0x4c, 0xb4, 0xf5, 0xd5, 0x8a, 0x2e, 0x6a, 0x43, 0x0b, 0x25, 0x04, 0x0a, 0x52, 0x68, + 0x04, 0x7b, 0xea, 0xa5, 0xa0, 0x26, 0x94, 0x1c, 0x42, 0x61, 0x29, 0x3d, 0x78, 0x29, 0x89, 0x3c, + 0x4d, 0x8a, 0xc9, 0x6e, 0x77, 0x37, 0x14, 0xff, 0x7d, 0x71, 0xa3, 0x42, 0x45, 0x7a, 0xda, 0xf7, + 0x31, 0xf3, 0x66, 0x98, 0x05, 0x10, 0xf1, 0x52, 0x79, 0x5c, 0x30, 0xc5, 0x48, 0x5d, 0x6d, 0x38, + 0xca, 0x9b, 0x26, 0x1f, 0xf3, 0x6a, 0xe2, 0xce, 0x01, 0x22, 0xcc, 0x13, 0x14, 0x13, 0xa5, 0x04, + 0x69, 0x43, 0x2d, 0xf4, 0x6d, 0xc3, 0x31, 0x86, 0x16, 0xad, 0x85, 0x3e, 0x21, 0x60, 0x15, 0x71, + 0x8e, 0x76, 0xcd, 0x31, 0x86, 0x4d, 0xaa, 0x6b, 0xd2, 0x01, 0xb3, 0x14, 0x6b, 0xdb, 0xd4, 0xa3, + 0x6d, 0x49, 0x06, 0xd0, 0xe0, 0x88, 0x22, 0xf4, 0x6d, 0xcb, 0x31, 0x86, 0x2d, 0xba, 0xeb, 0xdc, + 0x2f, 0xe8, 0x54, 0xb7, 0x65, 0x9a, 0xf1, 0x59, 0x1a, 0x17, 0x2b, 0x24, 0x23, 0xb0, 0xb6, 0x1e, + 0xb4, 0x46, 0x7b, 0x7c, 0xeb, 0x69, 0x43, 0xde, 0x31, 0xec, 0x7d, 0xc3, 0x91, 0x6a, 0x20, 0xb9, + 0x07, 0x2b, 0x56, 0x4a, 0x68, 0x0b, 0x97, 0xe3, 0xee, 0x1f, 0xc2, 0xd6, 0x33, 0xd5, 0x6b, 0xf7, + 0x05, 0xfa, 0xc7, 0x47, 0x28, 0xf2, 0xf5, 0xe6, 0xc0, 0x37, 0xfe, 0xe7, 0x5f, 0x43, 0xff, 0x15, + 0xd5, 0x6c, 0x5d, 0x4a, 0x85, 0x22, 0x2c, 0x96, 0x8c, 0xe2, 0x77, 0x89, 0x52, 0xb9, 0x3f, 0x30, + 0x38, 0x5e, 0x48, 0xce, 0x0a, 0x89, 0xc4, 0x86, 0xf3, 0x45, 0x1a, 0x67, 0xc5, 0x2e, 0xb1, 0x16, + 0xdd, 0xb7, 0xa4, 0x07, 0x75, 0x14, 0x82, 0x89, 0x5d, 0x6e, 0x55, 0x43, 0x1e, 0xe1, 0x22, 0x4f, + 0xb4, 0xa6, 0xb4, 0x4d, 0xc7, 0x3c, 0xed, 0xe6, 0x00, 0x79, 0x78, 0x86, 0xde, 0xa9, 0x58, 0x48, + 0x1b, 0x60, 0xe2, 0xfb, 0x9f, 0x51, 0x10, 0x4d, 0x03, 0xda, 0x39, 0x23, 0x5d, 0xb8, 0xa2, 0x41, + 0xf4, 0xf6, 0x11, 0xec, 0x47, 0xc6, 0xd4, 0x99, 0xdf, 0xad, 0x32, 0x95, 0x96, 0x89, 0xb7, 0x60, + 0xf9, 0x28, 0x46, 0xb1, 0x62, 0x19, 0xab, 0xde, 0x91, 0x56, 0x4c, 0x1a, 0xfa, 0xf7, 0x9f, 0x7e, + 0x03, 0x00, 0x00, 0xff, 0xff, 0x01, 0xb2, 0x9b, 0x00, 0x1d, 0x02, 0x00, 0x00, +} diff --git a/types/rpc.go b/types/rpc.go index 6a039c693..20c25c3e0 100644 --- a/types/rpc.go +++ b/types/rpc.go @@ -8,7 +8,7 @@ package types import "strconv" func AddCategory(confs map[string]*ConfigItem, category string) *ConfigItem { - cat := &ConfigItem{Props:make(map[string]string)} + cat := &ConfigItem{Props: make(map[string]string)} confs[category] = cat return cat } @@ -24,11 +24,11 @@ func (ci *ConfigItem) AddBool(key string, value bool) *ConfigItem { } func (ci *ConfigItem) AddFloat(key string, value float64) *ConfigItem { - ci.Add(key, strconv.FormatFloat(value,'g', -1,64)) + ci.Add(key, strconv.FormatFloat(value, 'g', -1, 64)) return ci } func (ci *ConfigItem) Add(key, value string) *ConfigItem { ci.Props[key] = value return ci -} \ No newline at end of file +} diff --git a/types/rpc.pb.go b/types/rpc.pb.go index bbc1168e8..64fd42315 100644 --- a/types/rpc.pb.go +++ b/types/rpc.pb.go @@ -1,15 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: rpc.proto -package types // import "github.com/aergoio/aergo/types" - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" +package types import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -48,6 +47,7 @@ var CommitStatus_name = map[int32]string{ 7: "TX_HAS_SAME_NONCE", 9: "TX_INTERNAL_ERROR", } + var CommitStatus_value = map[string]int32{ "TX_OK": 0, "TX_NONCE_TOO_LOW": 1, @@ -63,8 +63,9 @@ var CommitStatus_value = map[string]int32{ func (x CommitStatus) String() string { return proto.EnumName(CommitStatus_name, int32(x)) } + func (CommitStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{0} + return fileDescriptor_77a6da22d6a3feb1, []int{0} } type VerifyStatus int32 @@ -80,6 +81,7 @@ var VerifyStatus_name = map[int32]string{ 1: "VERIFY_STATUS_SIGN_NOT_MATCH", 2: "VERIFY_STATUS_INVALID_HASH", } + var VerifyStatus_value = map[string]int32{ "VERIFY_STATUS_OK": 0, "VERIFY_STATUS_SIGN_NOT_MATCH": 1, @@ -89,8 +91,9 @@ var VerifyStatus_value = map[string]int32{ func (x VerifyStatus) String() string { return proto.EnumName(VerifyStatus_name, int32(x)) } + func (VerifyStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{1} + return fileDescriptor_77a6da22d6a3feb1, []int{1} } // BlockchainStatus is current status of blockchain @@ -108,16 +111,17 @@ func (m *BlockchainStatus) Reset() { *m = BlockchainStatus{} } func (m *BlockchainStatus) String() string { return proto.CompactTextString(m) } func (*BlockchainStatus) ProtoMessage() {} func (*BlockchainStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{0} + return fileDescriptor_77a6da22d6a3feb1, []int{0} } + func (m *BlockchainStatus) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockchainStatus.Unmarshal(m, b) } func (m *BlockchainStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockchainStatus.Marshal(b, m, deterministic) } -func (dst *BlockchainStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockchainStatus.Merge(dst, src) +func (m *BlockchainStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockchainStatus.Merge(m, src) } func (m *BlockchainStatus) XXX_Size() int { return xxx_messageInfo_BlockchainStatus.Size(m) @@ -170,16 +174,17 @@ func (m *ChainId) Reset() { *m = ChainId{} } func (m *ChainId) String() string { return proto.CompactTextString(m) } func (*ChainId) ProtoMessage() {} func (*ChainId) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{1} + return fileDescriptor_77a6da22d6a3feb1, []int{1} } + func (m *ChainId) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChainId.Unmarshal(m, b) } func (m *ChainId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ChainId.Marshal(b, m, deterministic) } -func (dst *ChainId) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainId.Merge(dst, src) +func (m *ChainId) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChainId.Merge(m, src) } func (m *ChainId) XXX_Size() int { return xxx_messageInfo_ChainId.Size(m) @@ -237,16 +242,17 @@ func (m *ChainInfo) Reset() { *m = ChainInfo{} } func (m *ChainInfo) String() string { return proto.CompactTextString(m) } func (*ChainInfo) ProtoMessage() {} func (*ChainInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{2} + return fileDescriptor_77a6da22d6a3feb1, []int{2} } + func (m *ChainInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ChainInfo.Unmarshal(m, b) } func (m *ChainInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ChainInfo.Marshal(b, m, deterministic) } -func (dst *ChainInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChainInfo.Merge(dst, src) +func (m *ChainInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChainInfo.Merge(m, src) } func (m *ChainInfo) XXX_Size() int { return xxx_messageInfo_ChainInfo.Size(m) @@ -313,6 +319,46 @@ func (m *ChainInfo) GetNameprice() []byte { return nil } +// ChainStats corresponds to a chain statistics report. +type ChainStats struct { + Report string `protobuf:"bytes,1,opt,name=report,proto3" json:"report,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChainStats) Reset() { *m = ChainStats{} } +func (m *ChainStats) String() string { return proto.CompactTextString(m) } +func (*ChainStats) ProtoMessage() {} +func (*ChainStats) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{3} +} + +func (m *ChainStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ChainStats.Unmarshal(m, b) +} +func (m *ChainStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ChainStats.Marshal(b, m, deterministic) +} +func (m *ChainStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChainStats.Merge(m, src) +} +func (m *ChainStats) XXX_Size() int { + return xxx_messageInfo_ChainStats.Size(m) +} +func (m *ChainStats) XXX_DiscardUnknown() { + xxx_messageInfo_ChainStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ChainStats proto.InternalMessageInfo + +func (m *ChainStats) GetReport() string { + if m != nil { + return m.Report + } + return "" +} + type Input struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` Address [][]byte `protobuf:"bytes,2,rep,name=address,proto3" json:"address,omitempty"` @@ -327,16 +373,17 @@ func (m *Input) Reset() { *m = Input{} } func (m *Input) String() string { return proto.CompactTextString(m) } func (*Input) ProtoMessage() {} func (*Input) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{3} + return fileDescriptor_77a6da22d6a3feb1, []int{4} } + func (m *Input) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Input.Unmarshal(m, b) } func (m *Input) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Input.Marshal(b, m, deterministic) } -func (dst *Input) XXX_Merge(src proto.Message) { - xxx_messageInfo_Input.Merge(dst, src) +func (m *Input) XXX_Merge(src proto.Message) { + xxx_messageInfo_Input.Merge(m, src) } func (m *Input) XXX_Size() int { return xxx_messageInfo_Input.Size(m) @@ -389,16 +436,17 @@ func (m *Output) Reset() { *m = Output{} } func (m *Output) String() string { return proto.CompactTextString(m) } func (*Output) ProtoMessage() {} func (*Output) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{4} + return fileDescriptor_77a6da22d6a3feb1, []int{5} } + func (m *Output) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Output.Unmarshal(m, b) } func (m *Output) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Output.Marshal(b, m, deterministic) } -func (dst *Output) XXX_Merge(src proto.Message) { - xxx_messageInfo_Output.Merge(dst, src) +func (m *Output) XXX_Merge(src proto.Message) { + xxx_messageInfo_Output.Merge(m, src) } func (m *Output) XXX_Size() int { return xxx_messageInfo_Output.Size(m) @@ -447,16 +495,17 @@ func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{5} + return fileDescriptor_77a6da22d6a3feb1, []int{6} } + func (m *Empty) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Empty.Unmarshal(m, b) } func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Empty.Marshal(b, m, deterministic) } -func (dst *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(dst, src) +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) } func (m *Empty) XXX_Size() int { return xxx_messageInfo_Empty.Size(m) @@ -478,16 +527,17 @@ func (m *SingleBytes) Reset() { *m = SingleBytes{} } func (m *SingleBytes) String() string { return proto.CompactTextString(m) } func (*SingleBytes) ProtoMessage() {} func (*SingleBytes) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{6} + return fileDescriptor_77a6da22d6a3feb1, []int{7} } + func (m *SingleBytes) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_SingleBytes.Unmarshal(m, b) } func (m *SingleBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_SingleBytes.Marshal(b, m, deterministic) } -func (dst *SingleBytes) XXX_Merge(src proto.Message) { - xxx_messageInfo_SingleBytes.Merge(dst, src) +func (m *SingleBytes) XXX_Merge(src proto.Message) { + xxx_messageInfo_SingleBytes.Merge(m, src) } func (m *SingleBytes) XXX_Size() int { return xxx_messageInfo_SingleBytes.Size(m) @@ -516,16 +566,17 @@ func (m *AccountAddress) Reset() { *m = AccountAddress{} } func (m *AccountAddress) String() string { return proto.CompactTextString(m) } func (*AccountAddress) ProtoMessage() {} func (*AccountAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{7} + return fileDescriptor_77a6da22d6a3feb1, []int{8} } + func (m *AccountAddress) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AccountAddress.Unmarshal(m, b) } func (m *AccountAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AccountAddress.Marshal(b, m, deterministic) } -func (dst *AccountAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountAddress.Merge(dst, src) +func (m *AccountAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccountAddress.Merge(m, src) } func (m *AccountAddress) XXX_Size() int { return xxx_messageInfo_AccountAddress.Size(m) @@ -556,16 +607,17 @@ func (m *AccountAndRoot) Reset() { *m = AccountAndRoot{} } func (m *AccountAndRoot) String() string { return proto.CompactTextString(m) } func (*AccountAndRoot) ProtoMessage() {} func (*AccountAndRoot) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{8} + return fileDescriptor_77a6da22d6a3feb1, []int{9} } + func (m *AccountAndRoot) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AccountAndRoot.Unmarshal(m, b) } func (m *AccountAndRoot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AccountAndRoot.Marshal(b, m, deterministic) } -func (dst *AccountAndRoot) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountAndRoot.Merge(dst, src) +func (m *AccountAndRoot) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccountAndRoot.Merge(m, src) } func (m *AccountAndRoot) XXX_Size() int { return xxx_messageInfo_AccountAndRoot.Size(m) @@ -604,6 +656,7 @@ type Peer struct { Hidden bool `protobuf:"varint,4,opt,name=hidden,proto3" json:"hidden,omitempty"` LashCheck int64 `protobuf:"varint,5,opt,name=lashCheck,proto3" json:"lashCheck,omitempty"` Selfpeer bool `protobuf:"varint,6,opt,name=selfpeer,proto3" json:"selfpeer,omitempty"` + Version string `protobuf:"bytes,7,opt,name=version,proto3" json:"version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -613,16 +666,17 @@ func (m *Peer) Reset() { *m = Peer{} } func (m *Peer) String() string { return proto.CompactTextString(m) } func (*Peer) ProtoMessage() {} func (*Peer) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{9} + return fileDescriptor_77a6da22d6a3feb1, []int{10} } + func (m *Peer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Peer.Unmarshal(m, b) } func (m *Peer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Peer.Marshal(b, m, deterministic) } -func (dst *Peer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Peer.Merge(dst, src) +func (m *Peer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Peer.Merge(m, src) } func (m *Peer) XXX_Size() int { return xxx_messageInfo_Peer.Size(m) @@ -675,6 +729,13 @@ func (m *Peer) GetSelfpeer() bool { return false } +func (m *Peer) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + type PeerList struct { Peers []*Peer `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -686,16 +747,17 @@ func (m *PeerList) Reset() { *m = PeerList{} } func (m *PeerList) String() string { return proto.CompactTextString(m) } func (*PeerList) ProtoMessage() {} func (*PeerList) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{10} + return fileDescriptor_77a6da22d6a3feb1, []int{11} } + func (m *PeerList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PeerList.Unmarshal(m, b) } func (m *PeerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PeerList.Marshal(b, m, deterministic) } -func (dst *PeerList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeerList.Merge(dst, src) +func (m *PeerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerList.Merge(m, src) } func (m *PeerList) XXX_Size() int { return xxx_messageInfo_PeerList.Size(m) @@ -728,16 +790,17 @@ func (m *ListParams) Reset() { *m = ListParams{} } func (m *ListParams) String() string { return proto.CompactTextString(m) } func (*ListParams) ProtoMessage() {} func (*ListParams) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{11} + return fileDescriptor_77a6da22d6a3feb1, []int{12} } + func (m *ListParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListParams.Unmarshal(m, b) } func (m *ListParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ListParams.Marshal(b, m, deterministic) } -func (dst *ListParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListParams.Merge(dst, src) +func (m *ListParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListParams.Merge(m, src) } func (m *ListParams) XXX_Size() int { return xxx_messageInfo_ListParams.Size(m) @@ -795,16 +858,17 @@ func (m *PageParams) Reset() { *m = PageParams{} } func (m *PageParams) String() string { return proto.CompactTextString(m) } func (*PageParams) ProtoMessage() {} func (*PageParams) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{12} + return fileDescriptor_77a6da22d6a3feb1, []int{13} } + func (m *PageParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PageParams.Unmarshal(m, b) } func (m *PageParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PageParams.Marshal(b, m, deterministic) } -func (dst *PageParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_PageParams.Merge(dst, src) +func (m *PageParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_PageParams.Merge(m, src) } func (m *PageParams) XXX_Size() int { return xxx_messageInfo_PageParams.Size(m) @@ -843,16 +907,17 @@ func (m *BlockBodyPaged) Reset() { *m = BlockBodyPaged{} } func (m *BlockBodyPaged) String() string { return proto.CompactTextString(m) } func (*BlockBodyPaged) ProtoMessage() {} func (*BlockBodyPaged) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{13} + return fileDescriptor_77a6da22d6a3feb1, []int{14} } + func (m *BlockBodyPaged) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockBodyPaged.Unmarshal(m, b) } func (m *BlockBodyPaged) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockBodyPaged.Marshal(b, m, deterministic) } -func (dst *BlockBodyPaged) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockBodyPaged.Merge(dst, src) +func (m *BlockBodyPaged) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockBodyPaged.Merge(m, src) } func (m *BlockBodyPaged) XXX_Size() int { return xxx_messageInfo_BlockBodyPaged.Size(m) @@ -903,16 +968,17 @@ func (m *BlockBodyParams) Reset() { *m = BlockBodyParams{} } func (m *BlockBodyParams) String() string { return proto.CompactTextString(m) } func (*BlockBodyParams) ProtoMessage() {} func (*BlockBodyParams) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{14} + return fileDescriptor_77a6da22d6a3feb1, []int{15} } + func (m *BlockBodyParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockBodyParams.Unmarshal(m, b) } func (m *BlockBodyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockBodyParams.Marshal(b, m, deterministic) } -func (dst *BlockBodyParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockBodyParams.Merge(dst, src) +func (m *BlockBodyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockBodyParams.Merge(m, src) } func (m *BlockBodyParams) XXX_Size() int { return xxx_messageInfo_BlockBodyParams.Size(m) @@ -948,16 +1014,17 @@ func (m *BlockHeaderList) Reset() { *m = BlockHeaderList{} } func (m *BlockHeaderList) String() string { return proto.CompactTextString(m) } func (*BlockHeaderList) ProtoMessage() {} func (*BlockHeaderList) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{15} + return fileDescriptor_77a6da22d6a3feb1, []int{16} } + func (m *BlockHeaderList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockHeaderList.Unmarshal(m, b) } func (m *BlockHeaderList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockHeaderList.Marshal(b, m, deterministic) } -func (dst *BlockHeaderList) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockHeaderList.Merge(dst, src) +func (m *BlockHeaderList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockHeaderList.Merge(m, src) } func (m *BlockHeaderList) XXX_Size() int { return xxx_messageInfo_BlockHeaderList.Size(m) @@ -979,6 +1046,7 @@ type BlockMetadata struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` Header *BlockHeader `protobuf:"bytes,2,opt,name=header,proto3" json:"header,omitempty"` Txcount int32 `protobuf:"varint,3,opt,name=txcount,proto3" json:"txcount,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -988,16 +1056,17 @@ func (m *BlockMetadata) Reset() { *m = BlockMetadata{} } func (m *BlockMetadata) String() string { return proto.CompactTextString(m) } func (*BlockMetadata) ProtoMessage() {} func (*BlockMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{16} + return fileDescriptor_77a6da22d6a3feb1, []int{17} } + func (m *BlockMetadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockMetadata.Unmarshal(m, b) } func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic) } -func (dst *BlockMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockMetadata.Merge(dst, src) +func (m *BlockMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMetadata.Merge(m, src) } func (m *BlockMetadata) XXX_Size() int { return xxx_messageInfo_BlockMetadata.Size(m) @@ -1029,6 +1098,13 @@ func (m *BlockMetadata) GetTxcount() int32 { return 0 } +func (m *BlockMetadata) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + type BlockMetadataList struct { Blocks []*BlockMetadata `protobuf:"bytes,1,rep,name=blocks,proto3" json:"blocks,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1040,16 +1116,17 @@ func (m *BlockMetadataList) Reset() { *m = BlockMetadataList{} } func (m *BlockMetadataList) String() string { return proto.CompactTextString(m) } func (*BlockMetadataList) ProtoMessage() {} func (*BlockMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{17} + return fileDescriptor_77a6da22d6a3feb1, []int{18} } + func (m *BlockMetadataList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BlockMetadataList.Unmarshal(m, b) } func (m *BlockMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_BlockMetadataList.Marshal(b, m, deterministic) } -func (dst *BlockMetadataList) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockMetadataList.Merge(dst, src) +func (m *BlockMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockMetadataList.Merge(m, src) } func (m *BlockMetadataList) XXX_Size() int { return xxx_messageInfo_BlockMetadataList.Size(m) @@ -1080,16 +1157,17 @@ func (m *CommitResult) Reset() { *m = CommitResult{} } func (m *CommitResult) String() string { return proto.CompactTextString(m) } func (*CommitResult) ProtoMessage() {} func (*CommitResult) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{18} + return fileDescriptor_77a6da22d6a3feb1, []int{19} } + func (m *CommitResult) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitResult.Unmarshal(m, b) } func (m *CommitResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CommitResult.Marshal(b, m, deterministic) } -func (dst *CommitResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResult.Merge(dst, src) +func (m *CommitResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResult.Merge(m, src) } func (m *CommitResult) XXX_Size() int { return xxx_messageInfo_CommitResult.Size(m) @@ -1132,16 +1210,17 @@ func (m *CommitResultList) Reset() { *m = CommitResultList{} } func (m *CommitResultList) String() string { return proto.CompactTextString(m) } func (*CommitResultList) ProtoMessage() {} func (*CommitResultList) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{19} + return fileDescriptor_77a6da22d6a3feb1, []int{20} } + func (m *CommitResultList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CommitResultList.Unmarshal(m, b) } func (m *CommitResultList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_CommitResultList.Marshal(b, m, deterministic) } -func (dst *CommitResultList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResultList.Merge(dst, src) +func (m *CommitResultList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResultList.Merge(m, src) } func (m *CommitResultList) XXX_Size() int { return xxx_messageInfo_CommitResultList.Size(m) @@ -1171,16 +1250,17 @@ func (m *VerifyResult) Reset() { *m = VerifyResult{} } func (m *VerifyResult) String() string { return proto.CompactTextString(m) } func (*VerifyResult) ProtoMessage() {} func (*VerifyResult) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{20} + return fileDescriptor_77a6da22d6a3feb1, []int{21} } + func (m *VerifyResult) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VerifyResult.Unmarshal(m, b) } func (m *VerifyResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VerifyResult.Marshal(b, m, deterministic) } -func (dst *VerifyResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyResult.Merge(dst, src) +func (m *VerifyResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyResult.Merge(m, src) } func (m *VerifyResult) XXX_Size() int { return xxx_messageInfo_VerifyResult.Size(m) @@ -1217,16 +1297,17 @@ func (m *Personal) Reset() { *m = Personal{} } func (m *Personal) String() string { return proto.CompactTextString(m) } func (*Personal) ProtoMessage() {} func (*Personal) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{21} + return fileDescriptor_77a6da22d6a3feb1, []int{22} } + func (m *Personal) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Personal.Unmarshal(m, b) } func (m *Personal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Personal.Marshal(b, m, deterministic) } -func (dst *Personal) XXX_Merge(src proto.Message) { - xxx_messageInfo_Personal.Merge(dst, src) +func (m *Personal) XXX_Merge(src proto.Message) { + xxx_messageInfo_Personal.Merge(m, src) } func (m *Personal) XXX_Size() int { return xxx_messageInfo_Personal.Size(m) @@ -1264,16 +1345,17 @@ func (m *ImportFormat) Reset() { *m = ImportFormat{} } func (m *ImportFormat) String() string { return proto.CompactTextString(m) } func (*ImportFormat) ProtoMessage() {} func (*ImportFormat) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{22} + return fileDescriptor_77a6da22d6a3feb1, []int{23} } + func (m *ImportFormat) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ImportFormat.Unmarshal(m, b) } func (m *ImportFormat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ImportFormat.Marshal(b, m, deterministic) } -func (dst *ImportFormat) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImportFormat.Merge(dst, src) +func (m *ImportFormat) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportFormat.Merge(m, src) } func (m *ImportFormat) XXX_Size() int { return xxx_messageInfo_ImportFormat.Size(m) @@ -1317,16 +1399,17 @@ func (m *Staking) Reset() { *m = Staking{} } func (m *Staking) String() string { return proto.CompactTextString(m) } func (*Staking) ProtoMessage() {} func (*Staking) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{23} + return fileDescriptor_77a6da22d6a3feb1, []int{24} } + func (m *Staking) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Staking.Unmarshal(m, b) } func (m *Staking) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Staking.Marshal(b, m, deterministic) } -func (dst *Staking) XXX_Merge(src proto.Message) { - xxx_messageInfo_Staking.Merge(dst, src) +func (m *Staking) XXX_Merge(src proto.Message) { + xxx_messageInfo_Staking.Merge(m, src) } func (m *Staking) XXX_Size() int { return xxx_messageInfo_Staking.Size(m) @@ -1363,16 +1446,17 @@ func (m *Vote) Reset() { *m = Vote{} } func (m *Vote) String() string { return proto.CompactTextString(m) } func (*Vote) ProtoMessage() {} func (*Vote) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{24} + return fileDescriptor_77a6da22d6a3feb1, []int{25} } + func (m *Vote) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Vote.Unmarshal(m, b) } func (m *Vote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Vote.Marshal(b, m, deterministic) } -func (dst *Vote) XXX_Merge(src proto.Message) { - xxx_messageInfo_Vote.Merge(dst, src) +func (m *Vote) XXX_Merge(src proto.Message) { + xxx_messageInfo_Vote.Merge(m, src) } func (m *Vote) XXX_Size() int { return xxx_messageInfo_Vote.Size(m) @@ -1409,16 +1493,17 @@ func (m *VoteParams) Reset() { *m = VoteParams{} } func (m *VoteParams) String() string { return proto.CompactTextString(m) } func (*VoteParams) ProtoMessage() {} func (*VoteParams) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{25} + return fileDescriptor_77a6da22d6a3feb1, []int{26} } + func (m *VoteParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VoteParams.Unmarshal(m, b) } func (m *VoteParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VoteParams.Marshal(b, m, deterministic) } -func (dst *VoteParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoteParams.Merge(dst, src) +func (m *VoteParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteParams.Merge(m, src) } func (m *VoteParams) XXX_Size() int { return xxx_messageInfo_VoteParams.Size(m) @@ -1455,16 +1540,17 @@ func (m *AccountVoteInfo) Reset() { *m = AccountVoteInfo{} } func (m *AccountVoteInfo) String() string { return proto.CompactTextString(m) } func (*AccountVoteInfo) ProtoMessage() {} func (*AccountVoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{26} + return fileDescriptor_77a6da22d6a3feb1, []int{27} } + func (m *AccountVoteInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AccountVoteInfo.Unmarshal(m, b) } func (m *AccountVoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AccountVoteInfo.Marshal(b, m, deterministic) } -func (dst *AccountVoteInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountVoteInfo.Merge(dst, src) +func (m *AccountVoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccountVoteInfo.Merge(m, src) } func (m *AccountVoteInfo) XXX_Size() int { return xxx_messageInfo_AccountVoteInfo.Size(m) @@ -1501,16 +1587,17 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{27} + return fileDescriptor_77a6da22d6a3feb1, []int{28} } + func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VoteInfo.Unmarshal(m, b) } func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) } -func (dst *VoteInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoteInfo.Merge(dst, src) +func (m *VoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteInfo.Merge(m, src) } func (m *VoteInfo) XXX_Size() int { return xxx_messageInfo_VoteInfo.Size(m) @@ -1547,16 +1634,17 @@ func (m *VoteList) Reset() { *m = VoteList{} } func (m *VoteList) String() string { return proto.CompactTextString(m) } func (*VoteList) ProtoMessage() {} func (*VoteList) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{28} + return fileDescriptor_77a6da22d6a3feb1, []int{29} } + func (m *VoteList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VoteList.Unmarshal(m, b) } func (m *VoteList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_VoteList.Marshal(b, m, deterministic) } -func (dst *VoteList) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoteList.Merge(dst, src) +func (m *VoteList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteList.Merge(m, src) } func (m *VoteList) XXX_Size() int { return xxx_messageInfo_VoteList.Size(m) @@ -1593,16 +1681,17 @@ func (m *NodeReq) Reset() { *m = NodeReq{} } func (m *NodeReq) String() string { return proto.CompactTextString(m) } func (*NodeReq) ProtoMessage() {} func (*NodeReq) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{29} + return fileDescriptor_77a6da22d6a3feb1, []int{30} } + func (m *NodeReq) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NodeReq.Unmarshal(m, b) } func (m *NodeReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NodeReq.Marshal(b, m, deterministic) } -func (dst *NodeReq) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeReq.Merge(dst, src) +func (m *NodeReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeReq.Merge(m, src) } func (m *NodeReq) XXX_Size() int { return xxx_messageInfo_NodeReq.Size(m) @@ -1629,6 +1718,7 @@ func (m *NodeReq) GetComponent() []byte { type Name struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + BlockNo uint64 `protobuf:"varint,2,opt,name=blockNo,proto3" json:"blockNo,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1638,16 +1728,17 @@ func (m *Name) Reset() { *m = Name{} } func (m *Name) String() string { return proto.CompactTextString(m) } func (*Name) ProtoMessage() {} func (*Name) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{30} + return fileDescriptor_77a6da22d6a3feb1, []int{31} } + func (m *Name) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Name.Unmarshal(m, b) } func (m *Name) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Name.Marshal(b, m, deterministic) } -func (dst *Name) XXX_Merge(src proto.Message) { - xxx_messageInfo_Name.Merge(dst, src) +func (m *Name) XXX_Merge(src proto.Message) { + xxx_messageInfo_Name.Merge(m, src) } func (m *Name) XXX_Size() int { return xxx_messageInfo_Name.Size(m) @@ -1665,6 +1756,13 @@ func (m *Name) GetName() string { return "" } +func (m *Name) GetBlockNo() uint64 { + if m != nil { + return m.BlockNo + } + return 0 +} + type NameInfo struct { Name *Name `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Owner []byte `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` @@ -1678,16 +1776,17 @@ func (m *NameInfo) Reset() { *m = NameInfo{} } func (m *NameInfo) String() string { return proto.CompactTextString(m) } func (*NameInfo) ProtoMessage() {} func (*NameInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{31} + return fileDescriptor_77a6da22d6a3feb1, []int{32} } + func (m *NameInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_NameInfo.Unmarshal(m, b) } func (m *NameInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_NameInfo.Marshal(b, m, deterministic) } -func (dst *NameInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_NameInfo.Merge(dst, src) +func (m *NameInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NameInfo.Merge(m, src) } func (m *NameInfo) XXX_Size() int { return xxx_messageInfo_NameInfo.Size(m) @@ -1731,16 +1830,17 @@ func (m *PeersParams) Reset() { *m = PeersParams{} } func (m *PeersParams) String() string { return proto.CompactTextString(m) } func (*PeersParams) ProtoMessage() {} func (*PeersParams) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{32} + return fileDescriptor_77a6da22d6a3feb1, []int{33} } + func (m *PeersParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_PeersParams.Unmarshal(m, b) } func (m *PeersParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_PeersParams.Marshal(b, m, deterministic) } -func (dst *PeersParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_PeersParams.Merge(dst, src) +func (m *PeersParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeersParams.Merge(m, src) } func (m *PeersParams) XXX_Size() int { return xxx_messageInfo_PeersParams.Size(m) @@ -1776,16 +1876,17 @@ func (m *KeyParams) Reset() { *m = KeyParams{} } func (m *KeyParams) String() string { return proto.CompactTextString(m) } func (*KeyParams) ProtoMessage() {} func (*KeyParams) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{33} + return fileDescriptor_77a6da22d6a3feb1, []int{34} } + func (m *KeyParams) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_KeyParams.Unmarshal(m, b) } func (m *KeyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_KeyParams.Marshal(b, m, deterministic) } -func (dst *KeyParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyParams.Merge(dst, src) +func (m *KeyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyParams.Merge(m, src) } func (m *KeyParams) XXX_Size() int { return xxx_messageInfo_KeyParams.Size(m) @@ -1815,16 +1916,17 @@ func (m *ServerInfo) Reset() { *m = ServerInfo{} } func (m *ServerInfo) String() string { return proto.CompactTextString(m) } func (*ServerInfo) ProtoMessage() {} func (*ServerInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{34} + return fileDescriptor_77a6da22d6a3feb1, []int{35} } + func (m *ServerInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerInfo.Unmarshal(m, b) } func (m *ServerInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerInfo.Marshal(b, m, deterministic) } -func (dst *ServerInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerInfo.Merge(dst, src) +func (m *ServerInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerInfo.Merge(m, src) } func (m *ServerInfo) XXX_Size() int { return xxx_messageInfo_ServerInfo.Size(m) @@ -1860,16 +1962,17 @@ func (m *ConfigItem) Reset() { *m = ConfigItem{} } func (m *ConfigItem) String() string { return proto.CompactTextString(m) } func (*ConfigItem) ProtoMessage() {} func (*ConfigItem) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{35} + return fileDescriptor_77a6da22d6a3feb1, []int{36} } + func (m *ConfigItem) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ConfigItem.Unmarshal(m, b) } func (m *ConfigItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ConfigItem.Marshal(b, m, deterministic) } -func (dst *ConfigItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigItem.Merge(dst, src) +func (m *ConfigItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigItem.Merge(m, src) } func (m *ConfigItem) XXX_Size() int { return xxx_messageInfo_ConfigItem.Size(m) @@ -1898,16 +2001,17 @@ func (m *EventList) Reset() { *m = EventList{} } func (m *EventList) String() string { return proto.CompactTextString(m) } func (*EventList) ProtoMessage() {} func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{36} + return fileDescriptor_77a6da22d6a3feb1, []int{37} } + func (m *EventList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_EventList.Unmarshal(m, b) } func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_EventList.Marshal(b, m, deterministic) } -func (dst *EventList) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventList.Merge(dst, src) +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) } func (m *EventList) XXX_Size() int { return xxx_messageInfo_EventList.Size(m) @@ -1939,16 +2043,17 @@ func (m *ConsensusInfo) Reset() { *m = ConsensusInfo{} } func (m *ConsensusInfo) String() string { return proto.CompactTextString(m) } func (*ConsensusInfo) ProtoMessage() {} func (*ConsensusInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_rpc_d4110ef4f672b82e, []int{37} + return fileDescriptor_77a6da22d6a3feb1, []int{38} } + func (m *ConsensusInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ConsensusInfo.Unmarshal(m, b) } func (m *ConsensusInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ConsensusInfo.Marshal(b, m, deterministic) } -func (dst *ConsensusInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConsensusInfo.Merge(dst, src) +func (m *ConsensusInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusInfo.Merge(m, src) } func (m *ConsensusInfo) XXX_Size() int { return xxx_messageInfo_ConsensusInfo.Size(m) @@ -1981,9 +2086,12 @@ func (m *ConsensusInfo) GetBps() []string { } func init() { + proto.RegisterEnum("types.CommitStatus", CommitStatus_name, CommitStatus_value) + proto.RegisterEnum("types.VerifyStatus", VerifyStatus_name, VerifyStatus_value) proto.RegisterType((*BlockchainStatus)(nil), "types.BlockchainStatus") proto.RegisterType((*ChainId)(nil), "types.ChainId") proto.RegisterType((*ChainInfo)(nil), "types.ChainInfo") + proto.RegisterType((*ChainStats)(nil), "types.ChainStats") proto.RegisterType((*Input)(nil), "types.Input") proto.RegisterType((*Output)(nil), "types.Output") proto.RegisterType((*Empty)(nil), "types.Empty") @@ -2022,8 +2130,164 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "types.ConfigItem.PropsEntry") proto.RegisterType((*EventList)(nil), "types.EventList") proto.RegisterType((*ConsensusInfo)(nil), "types.ConsensusInfo") - proto.RegisterEnum("types.CommitStatus", CommitStatus_name, CommitStatus_value) - proto.RegisterEnum("types.VerifyStatus", VerifyStatus_name, VerifyStatus_value) +} + +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } + +var fileDescriptor_77a6da22d6a3feb1 = []byte{ + // 2423 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x39, 0xeb, 0x76, 0x22, 0xc7, + 0xd1, 0x80, 0x04, 0x82, 0x02, 0xa4, 0x51, 0x5b, 0xde, 0xd5, 0xc7, 0xb7, 0x5e, 0x2b, 0x1d, 0xc7, + 0x96, 0x1d, 0x5b, 0xf6, 0x6a, 0x6d, 0xc7, 0xf1, 0x49, 0xe2, 0x20, 0x8c, 0x56, 0x1c, 0x4b, 0x48, + 0x69, 0xf0, 0x46, 0xce, 0x8f, 0x90, 0x11, 0xd3, 0xc0, 0x1c, 0x31, 0x17, 0xcf, 0x34, 0xba, 0xf8, + 0x9c, 0xfc, 0xca, 0x03, 0x24, 0x6f, 0x92, 0x77, 0xc9, 0x6b, 0xe4, 0x25, 0x72, 0xba, 0xba, 0x7b, + 0x2e, 0x08, 0xe5, 0x9c, 0xcd, 0x2f, 0x4d, 0x55, 0xd7, 0xbd, 0xaa, 0xab, 0xaa, 0x11, 0xd4, 0xa2, + 0x70, 0x7c, 0x10, 0x46, 0x81, 0x08, 0x48, 0x59, 0xdc, 0x87, 0x3c, 0x6e, 0x59, 0x57, 0xf3, 0x60, + 0x7c, 0x3d, 0x9e, 0xd9, 0xae, 0xaf, 0x0e, 0x5a, 0x4d, 0x7b, 0x3c, 0x0e, 0x16, 0xbe, 0xd0, 0x20, + 0xf8, 0x81, 0xc3, 0xf5, 0x77, 0x2d, 0x3c, 0x0c, 0xf5, 0x67, 0xc3, 0xe3, 0x22, 0x72, 0xc7, 0x86, + 0x28, 0xb2, 0x27, 0x9a, 0x81, 0xfe, 0xb3, 0x08, 0xd6, 0x51, 0x22, 0x74, 0x20, 0x6c, 0xb1, 0x88, + 0xc9, 0xfb, 0xb0, 0x75, 0xc5, 0x63, 0x31, 0x42, 0x6d, 0xa3, 0x99, 0x1d, 0xcf, 0x76, 0x8b, 0x7b, + 0xc5, 0xfd, 0x06, 0x6b, 0x4a, 0x34, 0x92, 0x9f, 0xd8, 0xf1, 0x8c, 0xbc, 0x0b, 0x75, 0xa4, 0x9b, + 0x71, 0x77, 0x3a, 0x13, 0xbb, 0xa5, 0xbd, 0xe2, 0xfe, 0x3a, 0x03, 0x89, 0x3a, 0x41, 0x0c, 0xf9, + 0x05, 0x6c, 0x8e, 0x03, 0x3f, 0xe6, 0x7e, 0xbc, 0x88, 0x47, 0xae, 0x3f, 0x09, 0x76, 0xd7, 0xf6, + 0x8a, 0xfb, 0x35, 0xd6, 0x4c, 0xb0, 0x3d, 0x7f, 0x12, 0x90, 0x5f, 0x02, 0x41, 0x39, 0x68, 0xc3, + 0xc8, 0x75, 0x94, 0xca, 0x75, 0x54, 0x89, 0x96, 0x74, 0xe4, 0x41, 0xcf, 0x91, 0x4a, 0x69, 0x00, + 0x1b, 0x1a, 0x24, 0x3b, 0x50, 0xf6, 0xec, 0xa9, 0x3b, 0x46, 0xeb, 0x6a, 0x4c, 0x01, 0xe4, 0x09, + 0x54, 0xc2, 0xc5, 0xd5, 0xdc, 0x1d, 0xa3, 0x41, 0x55, 0xa6, 0x21, 0xb2, 0x0b, 0x1b, 0x9e, 0xed, + 0xfa, 0x3e, 0x17, 0x68, 0x45, 0x95, 0x19, 0x90, 0x3c, 0x83, 0x5a, 0x62, 0x10, 0xaa, 0xad, 0xb1, + 0x14, 0x41, 0xff, 0x5e, 0x82, 0x9a, 0xd2, 0x28, 0x6d, 0x7d, 0x0e, 0x25, 0xd7, 0x41, 0x85, 0xf5, + 0xc3, 0xcd, 0x03, 0x4c, 0xcb, 0x81, 0xb6, 0x87, 0x95, 0x5c, 0x87, 0xb4, 0xa0, 0x7a, 0x15, 0xf6, + 0x17, 0xde, 0x15, 0x8f, 0x50, 0x7f, 0x93, 0x25, 0x30, 0xa1, 0xd0, 0xf0, 0xec, 0x3b, 0x8c, 0x6a, + 0xec, 0xfe, 0xc4, 0xd1, 0x8c, 0x75, 0x96, 0xc3, 0x49, 0x5b, 0x3c, 0xfb, 0x4e, 0x04, 0xd7, 0xdc, + 0x8f, 0x75, 0x08, 0x52, 0x04, 0x79, 0x1f, 0x36, 0x63, 0x61, 0x5f, 0xbb, 0xfe, 0xd4, 0x73, 0x7d, + 0xd7, 0x5b, 0x78, 0xbb, 0x65, 0x24, 0x59, 0xc2, 0x4a, 0x4d, 0x22, 0x10, 0xf6, 0x5c, 0xa3, 0x77, + 0x2b, 0x48, 0x95, 0xc3, 0x49, 0x4b, 0xa7, 0x76, 0x1c, 0x46, 0xee, 0x98, 0xef, 0x6e, 0xe0, 0x79, + 0x02, 0x4b, 0x2b, 0x7c, 0xdb, 0xe3, 0xea, 0xb0, 0xaa, 0xac, 0x48, 0x10, 0xf4, 0x3d, 0x80, 0x8e, + 0x29, 0x97, 0x58, 0xc6, 0x3b, 0xe2, 0x61, 0x10, 0x09, 0x9d, 0x06, 0x0d, 0xd1, 0x31, 0x94, 0x7b, + 0x7e, 0xb8, 0x10, 0x84, 0xc0, 0x7a, 0xa6, 0x86, 0xf0, 0x5b, 0x26, 0xc3, 0x76, 0x9c, 0x88, 0xc7, + 0xf1, 0x6e, 0x69, 0x6f, 0x6d, 0xbf, 0xc1, 0x0c, 0x28, 0x93, 0x7a, 0x63, 0xcf, 0x17, 0x2a, 0x3a, + 0x0d, 0xa6, 0x00, 0xa9, 0x24, 0x1e, 0x47, 0x6e, 0x28, 0x74, 0x4c, 0x34, 0x44, 0x27, 0x50, 0x39, + 0x5f, 0x08, 0xa9, 0x65, 0x07, 0xca, 0xae, 0xef, 0xf0, 0x3b, 0x54, 0xd3, 0x64, 0x0a, 0xc8, 0xeb, + 0x29, 0xfe, 0xef, 0x7a, 0x36, 0xa0, 0xdc, 0xf5, 0x42, 0x71, 0x4f, 0x7f, 0x0e, 0xf5, 0x81, 0xeb, + 0x4f, 0xe7, 0xfc, 0xe8, 0x5e, 0xf0, 0x8c, 0x94, 0x62, 0x46, 0x0a, 0x7d, 0x1f, 0x36, 0xdb, 0xea, + 0x5e, 0xb6, 0x97, 0xb5, 0xe5, 0xe8, 0xfe, 0x9c, 0xd2, 0xf9, 0x0e, 0x0b, 0x02, 0x21, 0xed, 0xd5, + 0x18, 0x4d, 0x69, 0x40, 0x19, 0x45, 0x49, 0xa1, 0xdd, 0xc0, 0x6f, 0xf2, 0x1c, 0xa0, 0x13, 0x78, + 0xa1, 0xd4, 0xc0, 0x1d, 0x5d, 0xd5, 0x19, 0x0c, 0xfd, 0x77, 0x11, 0xd6, 0x2f, 0x38, 0x8f, 0xc8, + 0xc7, 0x69, 0x18, 0x54, 0xe9, 0x12, 0x5d, 0xba, 0xf2, 0x54, 0xdb, 0x98, 0x86, 0xe6, 0x25, 0xd4, + 0xe4, 0xad, 0xc3, 0xa2, 0x44, 0x7d, 0xf5, 0xc3, 0xb7, 0x35, 0x7d, 0x9f, 0xdf, 0xe2, 0xfd, 0xef, + 0x07, 0xc2, 0x1d, 0x73, 0x96, 0xd2, 0x49, 0x0f, 0x63, 0x61, 0x0b, 0x15, 0xcf, 0x32, 0x53, 0x80, + 0x8c, 0xe7, 0xcc, 0x75, 0x1c, 0xee, 0x63, 0x3c, 0xab, 0x4c, 0x43, 0xb2, 0xc0, 0xe6, 0x76, 0x3c, + 0xeb, 0xcc, 0xf8, 0xf8, 0x1a, 0x6b, 0x78, 0x8d, 0xa5, 0x08, 0x59, 0x9a, 0x31, 0x9f, 0x4f, 0x42, + 0xce, 0x23, 0x2c, 0xdd, 0x2a, 0x4b, 0x60, 0x19, 0xa1, 0x1b, 0x1e, 0xc5, 0x6e, 0xe0, 0x63, 0xd5, + 0xd6, 0x98, 0x01, 0xe9, 0x27, 0x50, 0x95, 0xee, 0x9c, 0xba, 0xb1, 0x20, 0x3f, 0x83, 0xb2, 0xa4, + 0x96, 0xee, 0xae, 0xed, 0xd7, 0x0f, 0xeb, 0x19, 0x77, 0x99, 0x3a, 0xa1, 0x37, 0x00, 0x92, 0xf4, + 0xc2, 0x8e, 0x6c, 0x2f, 0x5e, 0x59, 0xa4, 0xd2, 0xf8, 0x6c, 0x6b, 0xd3, 0x90, 0xa4, 0x4d, 0xee, + 0x6f, 0x93, 0xe1, 0xb7, 0xa4, 0x0d, 0x26, 0x93, 0x98, 0xab, 0xc2, 0x69, 0x32, 0x0d, 0x11, 0x0b, + 0xd6, 0xec, 0x78, 0x8c, 0x2e, 0x56, 0x99, 0xfc, 0xa4, 0x5f, 0x01, 0x5c, 0xd8, 0x53, 0xae, 0xf5, + 0xa6, 0x7c, 0xc5, 0x1c, 0x9f, 0xd1, 0x51, 0x4a, 0x75, 0xd0, 0x3b, 0xd8, 0xc4, 0xe0, 0x1f, 0x05, + 0xce, 0xbd, 0x14, 0x81, 0x1d, 0x10, 0xef, 0xb4, 0x29, 0x7a, 0x04, 0x32, 0x32, 0x4b, 0x2b, 0x65, + 0x66, 0xed, 0x7e, 0x0f, 0xd6, 0xaf, 0x02, 0xe7, 0x1e, 0xad, 0xae, 0x1f, 0x5a, 0x3a, 0x4e, 0x89, + 0x1a, 0x86, 0xa7, 0xf4, 0x2f, 0xb0, 0x95, 0xd1, 0x8c, 0x86, 0x53, 0x68, 0xc8, 0x20, 0x05, 0x91, + 0xaf, 0x9a, 0x9d, 0x0a, 0x5c, 0x0e, 0x47, 0x3e, 0x84, 0x4a, 0x68, 0x4f, 0x65, 0x03, 0x52, 0x55, + 0xb4, 0x6d, 0xd2, 0x90, 0xf8, 0xcf, 0x34, 0x01, 0xfd, 0x95, 0xd6, 0x70, 0xc2, 0x6d, 0x47, 0xe7, + 0xf0, 0x3d, 0xa8, 0xa8, 0xbe, 0xa8, 0x93, 0xd8, 0xc8, 0x1a, 0xc7, 0xf4, 0x19, 0xfd, 0x2b, 0x34, + 0x11, 0x71, 0xc6, 0x85, 0xed, 0xd8, 0xc2, 0x5e, 0x99, 0xc9, 0x8f, 0x64, 0x26, 0xa5, 0x60, 0x6d, + 0x08, 0xc9, 0x8a, 0x52, 0x2a, 0x99, 0xa6, 0x90, 0x05, 0x26, 0xee, 0xd4, 0x15, 0x54, 0xa5, 0x6c, + 0xc0, 0x24, 0x7e, 0xeb, 0x58, 0xaf, 0x2a, 0x27, 0x6d, 0xd8, 0xce, 0xa9, 0x47, 0xcb, 0x3f, 0x5e, + 0xb2, 0x7c, 0x27, 0xab, 0xce, 0x50, 0x26, 0x1e, 0x70, 0x68, 0x74, 0x02, 0xcf, 0x73, 0x05, 0xe3, + 0xf1, 0x62, 0xbe, 0xba, 0x5f, 0x7e, 0x08, 0x65, 0x1e, 0x45, 0x81, 0xb2, 0x7f, 0xf3, 0xf0, 0x2d, + 0x33, 0x79, 0x90, 0x4f, 0x8d, 0x6d, 0xa6, 0x28, 0x64, 0xf6, 0x1d, 0x2e, 0x6c, 0x77, 0xae, 0x87, + 0xad, 0x86, 0x68, 0x1b, 0xac, 0xac, 0x1a, 0x34, 0xf4, 0x13, 0xd8, 0x88, 0x10, 0x32, 0x96, 0xe6, + 0x05, 0x2b, 0x4a, 0x66, 0x68, 0xe8, 0x10, 0x1a, 0xaf, 0x79, 0xe4, 0x4e, 0xee, 0xb5, 0xa5, 0xff, + 0x07, 0x25, 0x71, 0xa7, 0x3b, 0x4a, 0x4d, 0x73, 0x0e, 0xef, 0x58, 0x49, 0xdc, 0x3d, 0x66, 0xb0, + 0x62, 0xcf, 0x19, 0x4c, 0x87, 0xf2, 0xde, 0x46, 0x71, 0xe0, 0xdb, 0x73, 0xd9, 0xd1, 0x42, 0x3b, + 0x8e, 0xc3, 0x59, 0x64, 0xc7, 0x5c, 0x0f, 0x94, 0x0c, 0x86, 0xec, 0xc3, 0x86, 0xde, 0x78, 0x74, + 0x26, 0xcd, 0x0c, 0xd6, 0x6d, 0x92, 0x99, 0x63, 0x3a, 0x83, 0x46, 0xcf, 0x93, 0x83, 0xe8, 0x38, + 0x88, 0x3c, 0x5b, 0x56, 0xd3, 0xda, 0xad, 0x3b, 0x59, 0x6a, 0x7f, 0x99, 0x56, 0xce, 0xe4, 0xb1, + 0x4c, 0x7e, 0x30, 0x77, 0xa4, 0x42, 0x94, 0x5f, 0x63, 0x06, 0x94, 0x27, 0x3e, 0xbf, 0xc5, 0x13, + 0x15, 0x57, 0x03, 0xd2, 0x2f, 0x60, 0x63, 0xa0, 0x67, 0xea, 0x13, 0xa8, 0xd8, 0x5e, 0xa6, 0x7b, + 0x6b, 0x48, 0xa6, 0xf4, 0x76, 0xc6, 0x7d, 0xdd, 0x47, 0xf0, 0x9b, 0xfe, 0x06, 0xd6, 0x5f, 0x07, + 0x02, 0x67, 0xed, 0xd8, 0xf6, 0x1d, 0xd7, 0x91, 0xcd, 0x53, 0xb1, 0xa5, 0x88, 0x8c, 0xc4, 0x52, + 0x56, 0x22, 0x3d, 0x04, 0x90, 0xdc, 0xfa, 0x32, 0x6e, 0x26, 0x5b, 0x49, 0x0d, 0xb7, 0x90, 0x1d, + 0x28, 0xa7, 0x41, 0x6a, 0x32, 0x05, 0x50, 0x07, 0xb6, 0x74, 0x98, 0x24, 0x2b, 0xae, 0x33, 0xfb, + 0xb0, 0x61, 0x76, 0x84, 0xfc, 0x4e, 0xa3, 0x3d, 0x62, 0xe6, 0x98, 0x7c, 0x00, 0x95, 0x9b, 0x40, + 0xa8, 0xbb, 0x2c, 0x2b, 0x65, 0xcb, 0x64, 0x54, 0x8b, 0x62, 0xfa, 0x98, 0x7e, 0x0d, 0xd5, 0x44, + 0xbc, 0xb2, 0xab, 0x94, 0xd8, 0xf5, 0x1c, 0x20, 0x71, 0x4d, 0xc6, 0x71, 0x4d, 0xa6, 0x37, 0xc5, + 0xd0, 0xdf, 0x2a, 0x5e, 0xd3, 0xc2, 0x6f, 0x02, 0x49, 0x96, 0x6f, 0xe1, 0xf2, 0x9c, 0xa9, 0x93, + 0x65, 0xf1, 0xb4, 0x0d, 0x1b, 0xfd, 0xc0, 0xe1, 0x8c, 0xff, 0x88, 0xb7, 0xd8, 0xf5, 0x78, 0xb0, + 0x48, 0x06, 0xa9, 0x06, 0xd5, 0xb6, 0xe7, 0x85, 0x81, 0xcf, 0x93, 0xa0, 0xa6, 0x08, 0xfa, 0x39, + 0xac, 0xf7, 0x6d, 0x8f, 0xcb, 0x8c, 0xc9, 0x85, 0x47, 0xc7, 0x14, 0xbf, 0xa5, 0xcc, 0x2b, 0x35, + 0xfc, 0x74, 0x22, 0x0d, 0x48, 0xc7, 0x50, 0x95, 0x5c, 0xe8, 0xf3, 0xbb, 0x19, 0xce, 0xd4, 0x6c, + 0x79, 0xac, 0xc5, 0xec, 0x40, 0x39, 0xb8, 0xf5, 0x75, 0x2f, 0x6a, 0x30, 0x05, 0x90, 0x3d, 0xa8, + 0x3b, 0x3c, 0x16, 0xae, 0x6f, 0x0b, 0x39, 0xdb, 0xd4, 0x56, 0x92, 0x45, 0xd1, 0x2e, 0xd4, 0xe5, + 0xfc, 0x8a, 0x75, 0xce, 0x5b, 0x50, 0xf5, 0x83, 0x13, 0x35, 0x5c, 0x8b, 0x6a, 0x48, 0x1a, 0x18, + 0x07, 0xe8, 0x2c, 0xb8, 0x1d, 0xf0, 0xf9, 0x44, 0x6f, 0xc1, 0x09, 0x4c, 0xdf, 0x81, 0xda, 0x77, + 0xdc, 0x74, 0x71, 0x0b, 0xd6, 0xae, 0xf9, 0x3d, 0x86, 0xb8, 0xc6, 0xe4, 0x27, 0xfd, 0x5b, 0x09, + 0x60, 0xc0, 0xa3, 0x1b, 0x1e, 0xa1, 0x37, 0x5f, 0x40, 0x25, 0xc6, 0xdb, 0xaa, 0xd3, 0xf0, 0x8e, + 0xa9, 0x8f, 0x84, 0xe4, 0x40, 0xdd, 0xe6, 0xae, 0x2f, 0xa2, 0x7b, 0xa6, 0x89, 0x25, 0xdb, 0x38, + 0xf0, 0x27, 0xae, 0xa9, 0x96, 0x15, 0x6c, 0x1d, 0x3c, 0xd7, 0x6c, 0x8a, 0xb8, 0xf5, 0x6b, 0xa8, + 0x67, 0xa4, 0xa5, 0xd6, 0x15, 0xb5, 0x75, 0xe9, 0x1e, 0xa5, 0x92, 0xae, 0x80, 0xaf, 0x4b, 0x5f, + 0x15, 0x5b, 0xa7, 0x50, 0xcf, 0x48, 0x5c, 0xc1, 0xfa, 0x41, 0x96, 0x35, 0x9d, 0x45, 0x8a, 0xa9, + 0x27, 0xb8, 0x97, 0x91, 0x46, 0x7f, 0x92, 0x9b, 0x95, 0x39, 0x20, 0x87, 0x50, 0x0e, 0xa3, 0x20, + 0x8c, 0xb5, 0x33, 0xcf, 0x1e, 0xb0, 0x1e, 0x5c, 0xc8, 0x63, 0xe5, 0x8b, 0x22, 0x6d, 0xc9, 0x31, + 0x9f, 0x20, 0xdf, 0xc4, 0x13, 0xfa, 0x02, 0x6a, 0xdd, 0x1b, 0xee, 0x0b, 0x33, 0x04, 0xb9, 0x04, + 0x96, 0x87, 0x20, 0x52, 0x30, 0x7d, 0x46, 0x7b, 0xd0, 0xec, 0xe4, 0x9e, 0x54, 0x04, 0xd6, 0x25, + 0x9d, 0x29, 0x5f, 0xf9, 0x2d, 0x71, 0xf8, 0x06, 0x53, 0x0a, 0xf1, 0x5b, 0xda, 0x75, 0x15, 0x9a, + 0x9b, 0x28, 0x3f, 0x3f, 0xfa, 0x57, 0xd1, 0x8c, 0x23, 0xfd, 0x1a, 0xac, 0x41, 0x79, 0x78, 0x39, + 0x3a, 0xff, 0xce, 0x2a, 0x90, 0x1d, 0xb0, 0x86, 0x97, 0xa3, 0xfe, 0x79, 0xbf, 0xd3, 0x1d, 0x0d, + 0xcf, 0xcf, 0x47, 0xa7, 0xe7, 0x7f, 0xb4, 0x8a, 0xe4, 0x6d, 0xd8, 0x1e, 0x5e, 0x8e, 0xda, 0xa7, + 0xac, 0xdb, 0xfe, 0xf6, 0x87, 0x51, 0xf7, 0xb2, 0x37, 0x18, 0x0e, 0xac, 0x12, 0x79, 0x0b, 0xb6, + 0x86, 0x97, 0xa3, 0x5e, 0xff, 0x75, 0xfb, 0xb4, 0xf7, 0xed, 0xe8, 0xa4, 0x3d, 0x38, 0xb1, 0xd6, + 0x96, 0x90, 0x83, 0xde, 0xab, 0xbe, 0xb5, 0xae, 0x05, 0x18, 0xe4, 0xf1, 0x39, 0x3b, 0x6b, 0x0f, + 0xad, 0x32, 0xf9, 0x7f, 0x78, 0x8a, 0xe8, 0xc1, 0xf7, 0xc7, 0xc7, 0xbd, 0x4e, 0xaf, 0xdb, 0x1f, + 0x8e, 0x8e, 0xda, 0xa7, 0xed, 0x7e, 0xa7, 0x6b, 0x55, 0x34, 0xcf, 0x49, 0x7b, 0x30, 0x1a, 0xb4, + 0xcf, 0xba, 0xca, 0x26, 0x6b, 0x23, 0x11, 0x35, 0xec, 0xb2, 0x7e, 0xfb, 0x74, 0xd4, 0x65, 0xec, + 0x9c, 0x59, 0xb5, 0x8f, 0x26, 0x66, 0x70, 0x69, 0x9f, 0x76, 0xc0, 0x7a, 0xdd, 0x65, 0xbd, 0xe3, + 0x1f, 0x46, 0x83, 0x61, 0x7b, 0xf8, 0xfd, 0x40, 0xb9, 0xb7, 0x07, 0xcf, 0xf2, 0x58, 0x69, 0xdf, + 0xa8, 0x7f, 0x3e, 0x1c, 0x9d, 0xb5, 0x87, 0x9d, 0x13, 0xab, 0x48, 0x9e, 0x43, 0x2b, 0x4f, 0x91, + 0x73, 0xaf, 0x74, 0xf8, 0x8f, 0x6d, 0xd8, 0x6a, 0xf3, 0x68, 0x1a, 0xb0, 0x8b, 0x8e, 0x2c, 0x75, + 0xf9, 0x96, 0x7a, 0x01, 0x35, 0xd9, 0x94, 0x06, 0xb8, 0x0f, 0x9b, 0xf6, 0xaa, 0xdb, 0x54, 0x6b, + 0xc5, 0x20, 0xa2, 0x05, 0xf2, 0x02, 0x2a, 0x67, 0xf8, 0x62, 0x27, 0x66, 0xef, 0x56, 0x60, 0xcc, + 0xf8, 0x8f, 0x0b, 0x1e, 0x8b, 0xd6, 0x66, 0x1e, 0x4d, 0x0b, 0xe4, 0x0b, 0x80, 0xf4, 0x1d, 0x4f, + 0x92, 0x2a, 0x91, 0x6f, 0x96, 0xd6, 0xd3, 0xec, 0xfa, 0x91, 0x79, 0xe8, 0xd3, 0x02, 0xf9, 0x0c, + 0x1a, 0xaf, 0xb8, 0x48, 0x9f, 0xb7, 0x79, 0x46, 0x2b, 0xf7, 0xc0, 0xf5, 0x27, 0x01, 0x2d, 0x90, + 0x03, 0xfd, 0x1a, 0x96, 0x22, 0x96, 0xc8, 0xb7, 0xb3, 0xe4, 0xf8, 0x38, 0xa4, 0x05, 0xf2, 0x0d, + 0x58, 0xb2, 0x90, 0x33, 0x9b, 0x56, 0x4c, 0x0c, 0x61, 0xba, 0x7f, 0xb7, 0x9e, 0x3c, 0xdc, 0xc8, + 0xe4, 0x29, 0x2d, 0x90, 0x23, 0xd8, 0x4e, 0x04, 0x24, 0x4b, 0xde, 0x0a, 0x09, 0xbb, 0xab, 0x96, + 0x2c, 0x2d, 0xe3, 0x05, 0x6c, 0x25, 0x32, 0x06, 0x22, 0xe2, 0xb6, 0xb7, 0x64, 0x7a, 0x6e, 0xb7, + 0xa4, 0x85, 0xcf, 0x8a, 0xa4, 0x0d, 0x4f, 0x1f, 0xa8, 0x5d, 0xc9, 0xba, 0x72, 0xb9, 0x43, 0x11, + 0x07, 0x50, 0x7d, 0xc5, 0x95, 0x04, 0xb2, 0x22, 0xd1, 0xcb, 0x4a, 0xc9, 0xef, 0xc0, 0x32, 0xf4, + 0xe9, 0x36, 0xbb, 0x82, 0xef, 0x11, 0x8d, 0xe4, 0x1b, 0x4c, 0x66, 0xb2, 0xa8, 0x93, 0x27, 0xcb, + 0xdb, 0xbc, 0x8e, 0xd4, 0xdb, 0x0f, 0xf1, 0x53, 0xee, 0xd0, 0x02, 0xd9, 0x87, 0xf2, 0x2b, 0x2e, + 0x86, 0x97, 0x2b, 0xb5, 0xa6, 0x0b, 0x1e, 0x2d, 0x90, 0xcf, 0x01, 0x8c, 0xaa, 0x47, 0xc8, 0xad, + 0x84, 0xbc, 0xe7, 0x1b, 0x07, 0x0f, 0x91, 0x8b, 0xf1, 0x31, 0x77, 0x43, 0xb1, 0x92, 0xcb, 0x14, + 0xb6, 0xa6, 0xa1, 0x05, 0xb9, 0xba, 0xbf, 0xe2, 0xa2, 0x7d, 0xd4, 0x5b, 0x49, 0x0f, 0x66, 0xfd, + 0x3b, 0xea, 0x29, 0xda, 0x01, 0xf7, 0x9d, 0xe1, 0x25, 0x49, 0x8d, 0x6d, 0xad, 0x5a, 0x69, 0xa9, + 0xbc, 0xec, 0x95, 0x81, 0x3b, 0xf5, 0xf3, 0xb4, 0x39, 0x1f, 0x3f, 0x86, 0xaa, 0x6a, 0x1a, 0xab, + 0xe5, 0x65, 0x37, 0x61, 0x8c, 0x48, 0x55, 0x69, 0x18, 0x5e, 0x92, 0x66, 0x42, 0x2d, 0x4b, 0x28, + 0xb9, 0x7f, 0xcb, 0xeb, 0x37, 0xde, 0x26, 0x59, 0x22, 0xaa, 0x37, 0xfc, 0xb7, 0x12, 0x41, 0x0a, + 0x5a, 0x20, 0xbf, 0xc7, 0x12, 0x41, 0xa8, 0xed, 0x3b, 0x17, 0x51, 0x10, 0x4c, 0x92, 0x1e, 0x91, + 0xff, 0x29, 0x21, 0xb1, 0x53, 0xa3, 0x91, 0x16, 0x73, 0xd0, 0xec, 0x44, 0x5c, 0xf2, 0xeb, 0x1f, + 0x16, 0xb6, 0x92, 0xb7, 0xb1, 0xda, 0xc1, 0x5b, 0x4b, 0x2b, 0x35, 0x5e, 0x9f, 0xba, 0xcc, 0x81, + 0x82, 0xe3, 0xa5, 0xfa, 0x27, 0x79, 0x72, 0xed, 0xd8, 0x67, 0x50, 0x3f, 0x0d, 0xc6, 0xd7, 0x6f, + 0xa0, 0xe4, 0x10, 0x9a, 0xdf, 0xfb, 0xf3, 0x37, 0xe3, 0xf9, 0x12, 0x9a, 0x6a, 0xc9, 0x37, 0x3c, + 0xc6, 0xe9, 0xec, 0xea, 0xbf, 0x9a, 0xaf, 0x7b, 0x97, 0xe5, 0x7b, 0xa0, 0x6b, 0x75, 0x63, 0x7e, + 0x09, 0xcd, 0x3f, 0x2c, 0x78, 0x74, 0xdf, 0x09, 0x7c, 0x11, 0xd9, 0xe3, 0xb4, 0x01, 0x22, 0xf6, + 0x11, 0xa6, 0x36, 0x90, 0x1c, 0x93, 0xca, 0xf6, 0x76, 0x36, 0xb3, 0x8a, 0xfd, 0xc9, 0x03, 0x94, + 0x49, 0xda, 0x0b, 0x2c, 0x13, 0xdc, 0xfe, 0x48, 0xf6, 0xa7, 0x1b, 0xbd, 0x0b, 0xb6, 0xb6, 0x32, + 0xb8, 0x24, 0x01, 0x92, 0xe5, 0x35, 0xee, 0xc9, 0xdb, 0x99, 0xdd, 0x79, 0x89, 0xc3, 0xac, 0xdb, + 0xd8, 0x68, 0xb7, 0xd2, 0x2c, 0x2b, 0xc6, 0xe5, 0xd2, 0x52, 0x3f, 0x10, 0x25, 0x86, 0x2e, 0xbd, + 0x26, 0xd4, 0x18, 0x52, 0xf5, 0x89, 0x6f, 0x86, 0x47, 0xd8, 0x97, 0xde, 0x18, 0xb4, 0x40, 0x3e, + 0xc1, 0x02, 0x4b, 0x56, 0xe8, 0xec, 0xd2, 0x9c, 0x58, 0x6a, 0x4e, 0x31, 0x7d, 0xd8, 0xce, 0x71, + 0x07, 0xd2, 0x3d, 0xd9, 0xb8, 0x78, 0xec, 0xce, 0x85, 0x5a, 0x30, 0x5b, 0xb9, 0x55, 0x09, 0x1b, + 0xf2, 0x4b, 0xf5, 0x93, 0x0f, 0x22, 0xe2, 0x55, 0x2c, 0x56, 0x96, 0x45, 0x87, 0xe5, 0x4b, 0x68, + 0x4a, 0x97, 0xd2, 0x95, 0xd8, 0x10, 0x25, 0x5b, 0x74, 0x32, 0xf8, 0x52, 0x22, 0x5a, 0x20, 0x5f, + 0xe1, 0x55, 0xcd, 0xaf, 0x65, 0xab, 0x27, 0x47, 0x8e, 0x86, 0x16, 0xc8, 0x77, 0x60, 0x75, 0x66, + 0xb6, 0x3f, 0xe5, 0x67, 0xdc, 0xbb, 0xe2, 0x51, 0x3c, 0x73, 0x43, 0xf2, 0x34, 0x99, 0xf8, 0x06, + 0xa5, 0x48, 0x5a, 0xcf, 0x1e, 0x39, 0x60, 0x3c, 0x9c, 0xdf, 0xd3, 0xc2, 0xd1, 0xde, 0x9f, 0x9e, + 0x4f, 0x5d, 0x31, 0x5b, 0x5c, 0x1d, 0x8c, 0x03, 0xef, 0x53, 0x5b, 0x2e, 0x27, 0x6e, 0xa0, 0xfe, + 0x7e, 0x8a, 0x9c, 0x57, 0x15, 0xfc, 0x57, 0xc0, 0xcb, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7b, + 0xe4, 0xc4, 0xfc, 0x70, 0x18, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2046,6 +2310,8 @@ type AergoRPCServiceClient interface { Blockchain(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*BlockchainStatus, error) // Returns current blockchain's basic information GetChainInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ChainInfo, error) + // Returns current chain statistics + ChainStat(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ChainStats, error) // Returns list of Blocks without body according to request ListBlockHeaders(ctx context.Context, in *ListParams, opts ...grpc.CallOption) (*BlockHeaderList, error) // Returns list of block metadata (hash, header, and number of transactions) according to request @@ -2114,6 +2380,8 @@ type AergoRPCServiceClient interface { GetServerInfo(ctx context.Context, in *KeyParams, opts ...grpc.CallOption) (*ServerInfo, error) // Returns status of consensus and bps GetConsensusInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ConsensusInfo, error) + // Add & remove member of raft cluster + ChangeMembership(ctx context.Context, in *MembershipChange, opts ...grpc.CallOption) (*MembershipChangeReply, error) } type aergoRPCServiceClient struct { @@ -2160,6 +2428,15 @@ func (c *aergoRPCServiceClient) GetChainInfo(ctx context.Context, in *Empty, opt return out, nil } +func (c *aergoRPCServiceClient) ChainStat(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ChainStats, error) { + out := new(ChainStats) + err := c.cc.Invoke(ctx, "/types.AergoRPCService/ChainStat", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *aergoRPCServiceClient) ListBlockHeaders(ctx context.Context, in *ListParams, opts ...grpc.CallOption) (*BlockHeaderList, error) { out := new(BlockHeaderList) err := c.cc.Invoke(ctx, "/types.AergoRPCService/ListBlockHeaders", in, out, opts...) @@ -2535,6 +2812,15 @@ func (c *aergoRPCServiceClient) GetConsensusInfo(ctx context.Context, in *Empty, return out, nil } +func (c *aergoRPCServiceClient) ChangeMembership(ctx context.Context, in *MembershipChange, opts ...grpc.CallOption) (*MembershipChangeReply, error) { + out := new(MembershipChangeReply) + err := c.cc.Invoke(ctx, "/types.AergoRPCService/ChangeMembership", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AergoRPCServiceServer is the server API for AergoRPCService service. type AergoRPCServiceServer interface { // Returns the current state of this node @@ -2545,6 +2831,8 @@ type AergoRPCServiceServer interface { Blockchain(context.Context, *Empty) (*BlockchainStatus, error) // Returns current blockchain's basic information GetChainInfo(context.Context, *Empty) (*ChainInfo, error) + // Returns current chain statistics + ChainStat(context.Context, *Empty) (*ChainStats, error) // Returns list of Blocks without body according to request ListBlockHeaders(context.Context, *ListParams) (*BlockHeaderList, error) // Returns list of block metadata (hash, header, and number of transactions) according to request @@ -2613,6 +2901,8 @@ type AergoRPCServiceServer interface { GetServerInfo(context.Context, *KeyParams) (*ServerInfo, error) // Returns status of consensus and bps GetConsensusInfo(context.Context, *Empty) (*ConsensusInfo, error) + // Add & remove member of raft cluster + ChangeMembership(context.Context, *MembershipChange) (*MembershipChangeReply, error) } func RegisterAergoRPCServiceServer(s *grpc.Server, srv AergoRPCServiceServer) { @@ -2691,6 +2981,24 @@ func _AergoRPCService_GetChainInfo_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _AergoRPCService_ChainStat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AergoRPCServiceServer).ChainStat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.AergoRPCService/ChainStat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AergoRPCServiceServer).ChainStat(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + func _AergoRPCService_ListBlockHeaders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListParams) if err := dec(in); err != nil { @@ -3312,6 +3620,24 @@ func _AergoRPCService_GetConsensusInfo_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _AergoRPCService_ChangeMembership_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MembershipChange) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AergoRPCServiceServer).ChangeMembership(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/types.AergoRPCService/ChangeMembership", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AergoRPCServiceServer).ChangeMembership(ctx, req.(*MembershipChange)) + } + return interceptor(ctx, in, info, handler) +} + var _AergoRPCService_serviceDesc = grpc.ServiceDesc{ ServiceName: "types.AergoRPCService", HandlerType: (*AergoRPCServiceServer)(nil), @@ -3332,6 +3658,10 @@ var _AergoRPCService_serviceDesc = grpc.ServiceDesc{ MethodName: "GetChainInfo", Handler: _AergoRPCService_GetChainInfo_Handler, }, + { + MethodName: "ChainStat", + Handler: _AergoRPCService_ChainStat_Handler, + }, { MethodName: "ListBlockHeaders", Handler: _AergoRPCService_ListBlockHeaders_Handler, @@ -3456,6 +3786,10 @@ var _AergoRPCService_serviceDesc = grpc.ServiceDesc{ MethodName: "GetConsensusInfo", Handler: _AergoRPCService_GetConsensusInfo_Handler, }, + { + MethodName: "ChangeMembership", + Handler: _AergoRPCService_ChangeMembership_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -3476,155 +3810,3 @@ var _AergoRPCService_serviceDesc = grpc.ServiceDesc{ }, Metadata: "rpc.proto", } - -func init() { proto.RegisterFile("rpc.proto", fileDescriptor_rpc_d4110ef4f672b82e) } - -var fileDescriptor_rpc_d4110ef4f672b82e = []byte{ - // 2325 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0xdd, 0x76, 0xdb, 0xc6, - 0xf1, 0x27, 0x29, 0x91, 0x22, 0x87, 0xa4, 0x04, 0x6d, 0x64, 0x5b, 0x7f, 0xfe, 0x1d, 0x47, 0xdd, - 0xa6, 0x8e, 0xe2, 0xda, 0x8a, 0x2d, 0xd7, 0xa9, 0x9b, 0xd3, 0x36, 0x85, 0x18, 0xca, 0xe2, 0x89, - 0x4c, 0xa9, 0x4b, 0xda, 0x55, 0x7a, 0x51, 0x16, 0x22, 0x96, 0x22, 0x8e, 0x08, 0x2c, 0x02, 0x2c, - 0xf5, 0x91, 0xdb, 0x3e, 0x40, 0x2f, 0xfa, 0x20, 0x7d, 0x96, 0xf6, 0x89, 0x7a, 0xf6, 0x0b, 0x1f, - 0x14, 0xdc, 0x73, 0xdc, 0x2b, 0x62, 0x66, 0xe7, 0x6b, 0x67, 0x66, 0x67, 0x7f, 0x4b, 0x68, 0x44, - 0xe1, 0x64, 0x2f, 0x8c, 0x18, 0x67, 0xa8, 0xca, 0x6f, 0x43, 0x1a, 0x77, 0xac, 0xf3, 0x39, 0x9b, - 0x5c, 0x4e, 0x66, 0x8e, 0x17, 0xa8, 0x85, 0x4e, 0xdb, 0x99, 0x4c, 0xd8, 0x22, 0xe0, 0x9a, 0x84, - 0x80, 0xb9, 0x54, 0x7f, 0x37, 0xc2, 0xfd, 0x50, 0x7f, 0xb6, 0x7c, 0xca, 0x23, 0x4f, 0x1b, 0xc3, - 0xff, 0x2c, 0x83, 0x75, 0x90, 0x18, 0x1a, 0x72, 0x87, 0x2f, 0x62, 0xf4, 0x18, 0x36, 0xce, 0x69, - 0xcc, 0xc7, 0xd2, 0xc3, 0x78, 0xe6, 0xc4, 0xb3, 0xed, 0xf2, 0x4e, 0x79, 0xb7, 0x45, 0xda, 0x82, - 0x2d, 0xc5, 0x8f, 0x9c, 0x78, 0x86, 0x3e, 0x83, 0xa6, 0x94, 0x9b, 0x51, 0xef, 0x62, 0xc6, 0xb7, - 0x2b, 0x3b, 0xe5, 0xdd, 0x55, 0x02, 0x82, 0x75, 0x24, 0x39, 0xe8, 0x17, 0xb0, 0x3e, 0x61, 0x41, - 0x4c, 0x83, 0x78, 0x11, 0x8f, 0xbd, 0x60, 0xca, 0xb6, 0x57, 0x76, 0xca, 0xbb, 0x0d, 0xd2, 0x4e, - 0xb8, 0xfd, 0x60, 0xca, 0xd0, 0x2f, 0x01, 0x49, 0x3b, 0x32, 0x86, 0xb1, 0xe7, 0x2a, 0x97, 0xab, - 0xd2, 0xa5, 0x8c, 0xa4, 0x2b, 0x16, 0xfa, 0xae, 0x70, 0x8a, 0x19, 0xac, 0x69, 0x12, 0x6d, 0x41, - 0xd5, 0x77, 0x2e, 0xbc, 0x89, 0x8c, 0xae, 0x41, 0x14, 0x81, 0xee, 0x43, 0x2d, 0x5c, 0x9c, 0xcf, - 0xbd, 0x89, 0x0c, 0xa8, 0x4e, 0x34, 0x85, 0xb6, 0x61, 0xcd, 0x77, 0xbc, 0x20, 0xa0, 0x5c, 0x46, - 0x51, 0x27, 0x86, 0x44, 0x0f, 0xa1, 0x91, 0x04, 0x24, 0xdd, 0x36, 0x48, 0xca, 0xc0, 0x7f, 0xaf, - 0x40, 0x43, 0x79, 0x14, 0xb1, 0x3e, 0x82, 0x8a, 0xe7, 0x4a, 0x87, 0xcd, 0xfd, 0xf5, 0x3d, 0x59, - 0x8a, 0x3d, 0x1d, 0x0f, 0xa9, 0x78, 0x2e, 0xea, 0x40, 0xfd, 0x3c, 0x1c, 0x2c, 0xfc, 0x73, 0x1a, - 0x49, 0xff, 0x6d, 0x92, 0xd0, 0x08, 0x43, 0xcb, 0x77, 0x6e, 0x64, 0x56, 0x63, 0xef, 0x27, 0x2a, - 0xc3, 0x58, 0x25, 0x39, 0x9e, 0x88, 0xc5, 0x77, 0x6e, 0x38, 0xbb, 0xa4, 0x41, 0xac, 0x53, 0x90, - 0x32, 0xd0, 0x63, 0x58, 0x8f, 0xb9, 0x73, 0xe9, 0x05, 0x17, 0xbe, 0x17, 0x78, 0xfe, 0xc2, 0xdf, - 0xae, 0x4a, 0x91, 0x25, 0xae, 0xf0, 0xc4, 0x19, 0x77, 0xe6, 0x9a, 0xbd, 0x5d, 0x93, 0x52, 0x39, - 0x9e, 0x88, 0xf4, 0xc2, 0x89, 0xc3, 0xc8, 0x9b, 0xd0, 0xed, 0x35, 0xb9, 0x9e, 0xd0, 0x22, 0x8a, - 0xc0, 0xf1, 0xa9, 0x5a, 0xac, 0xab, 0x28, 0x12, 0x06, 0x9e, 0x40, 0xb5, 0x1f, 0x84, 0x0b, 0x8e, - 0x10, 0xac, 0x66, 0xba, 0x43, 0x7e, 0x8b, 0x34, 0x3b, 0xae, 0x1b, 0xd1, 0x38, 0xde, 0xae, 0xec, - 0xac, 0xec, 0xb6, 0x88, 0x21, 0x45, 0xb9, 0xae, 0x9c, 0xf9, 0x42, 0xed, 0xbb, 0x45, 0x14, 0x21, - 0xca, 0x15, 0x4f, 0x22, 0x2f, 0xe4, 0x7a, 0xb7, 0x9a, 0xc2, 0x53, 0xa8, 0x9d, 0x2c, 0xb8, 0xf0, - 0xb2, 0x05, 0x55, 0x2f, 0x70, 0xe9, 0x8d, 0x74, 0xd3, 0x26, 0x8a, 0xc8, 0xfb, 0x29, 0xff, 0xef, - 0x7e, 0xd6, 0xa0, 0xda, 0xf3, 0x43, 0x7e, 0x8b, 0x7f, 0x0e, 0xcd, 0xa1, 0x17, 0x5c, 0xcc, 0xe9, - 0xc1, 0x2d, 0xa7, 0x19, 0x2b, 0xe5, 0x8c, 0x15, 0xfc, 0x18, 0xd6, 0x6d, 0x75, 0xca, 0xec, 0x65, - 0x6f, 0x39, 0xb9, 0xbf, 0xa4, 0x72, 0x81, 0x4b, 0x18, 0xe3, 0x22, 0x5e, 0xcd, 0xd1, 0x92, 0x86, - 0x14, 0x59, 0x14, 0x12, 0x7a, 0x1b, 0xf2, 0x1b, 0x3d, 0x02, 0xe8, 0x32, 0x3f, 0x14, 0x1e, 0xa8, - 0xab, 0xfb, 0x35, 0xc3, 0xc1, 0xff, 0x2a, 0xc3, 0xea, 0x29, 0xa5, 0x11, 0x7a, 0x9a, 0xa6, 0x41, - 0x35, 0x25, 0xd2, 0x4d, 0x29, 0x56, 0x75, 0x8c, 0x69, 0x6a, 0x5e, 0x42, 0x43, 0x9c, 0x27, 0xd9, - 0x6e, 0xd2, 0x5f, 0x73, 0xff, 0x9e, 0x96, 0x1f, 0xd0, 0x6b, 0x79, 0xb2, 0x07, 0x8c, 0x7b, 0x13, - 0x4a, 0x52, 0x39, 0xb1, 0xc3, 0x98, 0x3b, 0x5c, 0xe5, 0xb3, 0x4a, 0x14, 0x21, 0xf2, 0x39, 0xf3, - 0x5c, 0x97, 0x06, 0x32, 0x9f, 0x75, 0xa2, 0x29, 0xd1, 0x3a, 0x73, 0x27, 0x9e, 0x75, 0x67, 0x74, - 0x72, 0x29, 0xbb, 0x73, 0x85, 0xa4, 0x0c, 0xd1, 0x74, 0x31, 0x9d, 0x4f, 0x43, 0x4a, 0x23, 0xd9, - 0x94, 0x75, 0x92, 0xd0, 0xf8, 0x19, 0xd4, 0x45, 0xd0, 0xc7, 0x5e, 0xcc, 0xd1, 0xcf, 0xa0, 0x2a, - 0x78, 0x62, 0x53, 0x2b, 0xbb, 0xcd, 0xfd, 0x66, 0x66, 0x53, 0x44, 0xad, 0xe0, 0x2b, 0x00, 0x21, - 0x7a, 0xea, 0x44, 0x8e, 0x1f, 0x17, 0xb6, 0xa2, 0x08, 0x31, 0x3b, 0x9a, 0x34, 0x25, 0x64, 0x93, - 0xf3, 0xd7, 0x26, 0xf2, 0x5b, 0xc8, 0xb2, 0xe9, 0x34, 0xa6, 0xaa, 0x3d, 0xda, 0x44, 0x53, 0xc8, - 0x82, 0x15, 0x27, 0x9e, 0xc8, 0x8d, 0xd4, 0x89, 0xf8, 0xc4, 0xaf, 0x01, 0x4e, 0x9d, 0x0b, 0xaa, - 0xfd, 0xa6, 0x7a, 0xe5, 0x9c, 0x9e, 0xf1, 0x51, 0x49, 0x7d, 0xe0, 0x1b, 0x58, 0x97, 0x29, 0x3e, - 0x60, 0xee, 0xad, 0x30, 0x21, 0x27, 0x98, 0x3c, 0x93, 0xa6, 0xb5, 0x25, 0x91, 0xb1, 0x59, 0x29, - 0xb4, 0x99, 0x8d, 0xfb, 0x73, 0x58, 0x3d, 0x67, 0xee, 0xad, 0x8c, 0xba, 0xb9, 0x6f, 0xe9, 0x3c, - 0x25, 0x6e, 0x88, 0x5c, 0xc5, 0x7f, 0x85, 0x8d, 0x8c, 0x67, 0x19, 0x38, 0x86, 0x96, 0x48, 0x12, - 0x8b, 0x02, 0x35, 0xac, 0x54, 0xe2, 0x72, 0x3c, 0xf4, 0x25, 0xd4, 0x42, 0xe7, 0x42, 0x0c, 0x10, - 0xd5, 0x2b, 0x9b, 0xa6, 0x0c, 0xc9, 0xfe, 0x89, 0x16, 0xc0, 0xbf, 0xd6, 0x1e, 0x8e, 0xa8, 0xe3, - 0xea, 0x1a, 0x7e, 0x0e, 0x35, 0x35, 0xd7, 0x74, 0x11, 0x5b, 0xd9, 0xe0, 0x88, 0x5e, 0xc3, 0x1e, - 0xb4, 0x25, 0xe3, 0x2d, 0xe5, 0x8e, 0xeb, 0x70, 0xa7, 0xb0, 0x92, 0x4f, 0x44, 0x25, 0x85, 0x61, - 0x1d, 0x08, 0xca, 0x9a, 0x52, 0x2e, 0x89, 0x96, 0x10, 0x07, 0x8d, 0xdf, 0xa8, 0x83, 0xa6, 0x1a, - 0xd6, 0x90, 0xd8, 0x86, 0xcd, 0x9c, 0x2b, 0x19, 0xe5, 0xd3, 0xa5, 0x28, 0xb7, 0xb2, 0xa6, 0x8d, - 0x64, 0x12, 0x2d, 0x85, 0x56, 0x97, 0xf9, 0xbe, 0xc7, 0x09, 0x8d, 0x17, 0xf3, 0xe2, 0x09, 0xf8, - 0x25, 0x54, 0x69, 0x14, 0x31, 0x15, 0xeb, 0xfa, 0xfe, 0x27, 0xe6, 0x96, 0x90, 0x7a, 0xea, 0x8a, - 0x25, 0x4a, 0x42, 0x54, 0xda, 0xa5, 0xdc, 0xf1, 0xe6, 0xfa, 0x62, 0xd4, 0x14, 0xb6, 0xc1, 0xca, - 0xba, 0x91, 0x81, 0x3e, 0x83, 0xb5, 0x48, 0x52, 0x26, 0xd2, 0xbc, 0x61, 0x25, 0x49, 0x8c, 0x0c, - 0x1e, 0x41, 0xeb, 0x3d, 0x8d, 0xbc, 0xe9, 0xad, 0x8e, 0xf4, 0xff, 0xa0, 0xc2, 0x6f, 0xf4, 0x8c, - 0x68, 0x68, 0xcd, 0xd1, 0x0d, 0xa9, 0xf0, 0x9b, 0x0f, 0x05, 0xac, 0xd4, 0x73, 0x01, 0xe3, 0x91, - 0x38, 0xa3, 0x51, 0xcc, 0x02, 0x67, 0x2e, 0x66, 0x54, 0xe8, 0xc4, 0x71, 0x38, 0x8b, 0x9c, 0x98, - 0xea, 0x3b, 0x38, 0xc3, 0x41, 0xbb, 0xb0, 0xa6, 0x11, 0x89, 0xae, 0x9a, 0xb9, 0x2f, 0xf5, 0xe0, - 0x23, 0x66, 0x19, 0xcf, 0xa0, 0xd5, 0xf7, 0x43, 0x16, 0xf1, 0x43, 0x16, 0xf9, 0x8e, 0xe8, 0x9c, - 0x95, 0x6b, 0x6f, 0xba, 0x34, 0xd0, 0x32, 0xc3, 0x99, 0x88, 0x65, 0x51, 0x68, 0x36, 0x77, 0x85, - 0x43, 0x69, 0xbf, 0x41, 0x0c, 0x29, 0x56, 0x02, 0x7a, 0x2d, 0x57, 0x54, 0x5e, 0x0d, 0x89, 0x5f, - 0xc1, 0xda, 0x50, 0xdf, 0x7f, 0xf7, 0xa1, 0xe6, 0xf8, 0x99, 0x79, 0xac, 0x29, 0x51, 0xd2, 0xeb, - 0x19, 0x0d, 0xf4, 0xcc, 0x90, 0xdf, 0xf8, 0xb7, 0xb0, 0xfa, 0x9e, 0x71, 0x79, 0x2f, 0x4e, 0x9c, - 0xc0, 0xf5, 0x5c, 0x31, 0x0e, 0x95, 0x5a, 0xca, 0xc8, 0x58, 0xac, 0x64, 0x2d, 0xe2, 0x7d, 0x00, - 0xa1, 0xad, 0x0f, 0xde, 0x7a, 0x82, 0x20, 0x1a, 0x12, 0x31, 0x6c, 0x41, 0x35, 0x4d, 0x52, 0x9b, - 0x28, 0x02, 0xbb, 0xb0, 0xa1, 0xd3, 0x24, 0x54, 0x25, 0xf4, 0xd8, 0x85, 0x35, 0x73, 0x9f, 0xe7, - 0xf1, 0x87, 0xde, 0x11, 0x31, 0xcb, 0xe8, 0x0b, 0xa8, 0x5d, 0x31, 0xae, 0xce, 0xad, 0xe8, 0x94, - 0x0d, 0x53, 0x51, 0x6d, 0x8a, 0xe8, 0x65, 0xfc, 0x0d, 0xd4, 0x13, 0xf3, 0x2a, 0xae, 0x4a, 0x12, - 0xd7, 0x23, 0x80, 0x64, 0x6b, 0x22, 0x8f, 0x2b, 0xa2, 0xbc, 0x29, 0x07, 0xff, 0x4e, 0xe9, 0x9a, - 0x71, 0x7d, 0xc5, 0x84, 0x58, 0x7e, 0x5c, 0x8b, 0x75, 0xa2, 0x56, 0x96, 0xcd, 0x63, 0x1b, 0xd6, - 0x06, 0xcc, 0xa5, 0x84, 0xfe, 0x28, 0x4f, 0xac, 0xe7, 0x53, 0xb6, 0x48, 0xae, 0x46, 0x4d, 0x2a, - 0x64, 0xe6, 0x87, 0x2c, 0xa0, 0x49, 0x52, 0x53, 0x06, 0xee, 0xc0, 0xea, 0xc0, 0xf1, 0xa9, 0xa8, - 0x98, 0x00, 0x27, 0x3a, 0xa7, 0xf2, 0x1b, 0x4f, 0xa0, 0x2e, 0xd6, 0xe4, 0xce, 0x3e, 0xcb, 0xac, - 0xa7, 0xc1, 0x89, 0x65, 0x25, 0x2c, 0x4a, 0xc0, 0xae, 0x03, 0x3d, 0x5d, 0x5a, 0x44, 0x11, 0x68, - 0x07, 0x9a, 0x2e, 0x8d, 0xb9, 0x17, 0x38, 0xdc, 0x63, 0x81, 0x46, 0x13, 0x59, 0x16, 0xee, 0x41, - 0x53, 0xdc, 0x48, 0xb1, 0xae, 0x6c, 0x07, 0xea, 0x01, 0x3b, 0x52, 0x97, 0x62, 0x59, 0x5d, 0x6e, - 0x86, 0x96, 0x17, 0xdf, 0x8c, 0x5d, 0x0f, 0xe9, 0x7c, 0xaa, 0x71, 0x69, 0x42, 0xe3, 0x4f, 0xa1, - 0xf1, 0x3d, 0x35, 0x73, 0xd9, 0x82, 0x95, 0x4b, 0x7a, 0x2b, 0x13, 0xd9, 0x20, 0xe2, 0x13, 0xff, - 0xad, 0x02, 0x30, 0xa4, 0xd1, 0x15, 0x8d, 0xe4, 0x6e, 0x5e, 0x41, 0x2d, 0x96, 0x67, 0x52, 0x27, - 0xfb, 0x53, 0xd3, 0x05, 0x89, 0xc8, 0x9e, 0x3a, 0xb3, 0xbd, 0x80, 0x47, 0xb7, 0x44, 0x0b, 0x0b, - 0xb5, 0x09, 0x0b, 0xa6, 0x9e, 0xe9, 0x89, 0x02, 0xb5, 0xae, 0x5c, 0xd7, 0x6a, 0x4a, 0xb8, 0xf3, - 0x1b, 0x68, 0x66, 0xac, 0xa5, 0xd1, 0x95, 0x75, 0x74, 0x29, 0xfe, 0x51, 0xa5, 0x55, 0xc4, 0x37, - 0x95, 0xd7, 0xe5, 0xce, 0x31, 0x34, 0x33, 0x16, 0x0b, 0x54, 0xbf, 0xc8, 0xaa, 0xa6, 0xb7, 0x8b, - 0x52, 0xea, 0x73, 0xea, 0x67, 0xac, 0xe1, 0x9f, 0x04, 0x22, 0x32, 0x0b, 0x68, 0x1f, 0xaa, 0x61, - 0xc4, 0xc2, 0x58, 0x6f, 0xe6, 0xe1, 0x1d, 0xd5, 0xbd, 0x53, 0xb1, 0xac, 0xf6, 0xa2, 0x44, 0x3b, - 0xe2, 0xe2, 0x4e, 0x98, 0x1f, 0xb3, 0x13, 0xfc, 0x02, 0x1a, 0xbd, 0x2b, 0x1a, 0x70, 0x73, 0xad, - 0x51, 0x41, 0x2c, 0x5f, 0x6b, 0x52, 0x82, 0xe8, 0x35, 0xdc, 0x87, 0x76, 0x37, 0xf7, 0xc8, 0x41, - 0xb0, 0x2a, 0xe4, 0x4c, 0x93, 0x8a, 0x6f, 0xc1, 0x93, 0xaf, 0x22, 0xe5, 0x50, 0x7e, 0x8b, 0xb8, - 0xce, 0x43, 0x73, 0xde, 0xc4, 0xe7, 0x93, 0x7f, 0x97, 0xcd, 0xa5, 0xa3, 0xdf, 0x67, 0x0d, 0xa8, - 0x8e, 0xce, 0xc6, 0x27, 0xdf, 0x5b, 0x25, 0xb4, 0x05, 0xd6, 0xe8, 0x6c, 0x3c, 0x38, 0x19, 0x74, - 0x7b, 0xe3, 0xd1, 0xc9, 0xc9, 0xf8, 0xf8, 0xe4, 0x4f, 0x56, 0x19, 0xdd, 0x83, 0xcd, 0xd1, 0xd9, - 0xd8, 0x3e, 0x26, 0x3d, 0xfb, 0xbb, 0x1f, 0xc6, 0xbd, 0xb3, 0xfe, 0x70, 0x34, 0xb4, 0x2a, 0xe8, - 0x13, 0xd8, 0x18, 0x9d, 0x8d, 0xfb, 0x83, 0xf7, 0xf6, 0x71, 0xff, 0xbb, 0xf1, 0x91, 0x3d, 0x3c, - 0xb2, 0x56, 0x96, 0x98, 0xc3, 0xfe, 0x9b, 0x81, 0xb5, 0xaa, 0x0d, 0x18, 0xe6, 0xe1, 0x09, 0x79, - 0x6b, 0x8f, 0xac, 0x2a, 0xfa, 0x7f, 0x78, 0x20, 0xd9, 0xc3, 0x77, 0x87, 0x87, 0xfd, 0x6e, 0xbf, - 0x37, 0x18, 0x8d, 0x0f, 0xec, 0x63, 0x7b, 0xd0, 0xed, 0x59, 0x35, 0xad, 0x73, 0x64, 0x0f, 0xc7, - 0x43, 0xfb, 0x6d, 0x4f, 0xc5, 0x64, 0xad, 0x25, 0xa6, 0x46, 0x3d, 0x32, 0xb0, 0x8f, 0xc7, 0x3d, - 0x42, 0x4e, 0x88, 0xd5, 0x78, 0x32, 0x35, 0xd7, 0x93, 0xde, 0xd3, 0x16, 0x58, 0xef, 0x7b, 0xa4, - 0x7f, 0xf8, 0xc3, 0x78, 0x38, 0xb2, 0x47, 0xef, 0x86, 0x6a, 0x7b, 0x3b, 0xf0, 0x30, 0xcf, 0x15, - 0xf1, 0x8d, 0x07, 0x27, 0xa3, 0xf1, 0x5b, 0x7b, 0xd4, 0x3d, 0xb2, 0xca, 0xe8, 0x11, 0x74, 0xf2, - 0x12, 0xb9, 0xed, 0x55, 0xf6, 0xff, 0x61, 0xc1, 0x86, 0x4d, 0xa3, 0x0b, 0x46, 0x4e, 0xbb, 0xa2, - 0xd5, 0xc5, 0xeb, 0xe6, 0x05, 0x34, 0xc4, 0xe8, 0x19, 0x4a, 0x1c, 0x6b, 0x86, 0xa8, 0x1e, 0x46, - 0x9d, 0x82, 0xeb, 0x06, 0x97, 0xd0, 0x0b, 0xa8, 0xbd, 0x95, 0xef, 0x66, 0x64, 0xf0, 0xb2, 0x22, - 0x63, 0x42, 0x7f, 0x5c, 0xd0, 0x98, 0x77, 0xd6, 0xf3, 0x6c, 0x5c, 0x42, 0xaf, 0x00, 0xd2, 0x97, - 0x35, 0x4a, 0xba, 0x44, 0xbc, 0x35, 0x3a, 0x0f, 0xb2, 0x20, 0x23, 0xf3, 0xf4, 0xc6, 0x25, 0xf4, - 0x1c, 0x5a, 0x6f, 0x28, 0x4f, 0x1f, 0x9c, 0x79, 0x45, 0x2b, 0xf7, 0xe4, 0x0c, 0xa6, 0x0c, 0x97, - 0xd0, 0xb7, 0x60, 0x89, 0xc6, 0xcc, 0x60, 0xa1, 0x18, 0x99, 0xb3, 0x94, 0x22, 0xe4, 0xce, 0xfd, - 0xbb, 0x98, 0x49, 0xac, 0xe2, 0x12, 0x3a, 0x80, 0xcd, 0xc4, 0x40, 0x02, 0xc3, 0x0a, 0x2c, 0x6c, - 0x17, 0x41, 0x23, 0x6d, 0xe3, 0x05, 0x6c, 0x24, 0x36, 0x86, 0x3c, 0xa2, 0x8e, 0xbf, 0x14, 0x79, - 0x0e, 0xfd, 0xe1, 0xd2, 0xf3, 0x32, 0xb2, 0xe1, 0xc1, 0x1d, 0xb7, 0x85, 0xaa, 0x85, 0x90, 0x4c, - 0x9a, 0xd8, 0x83, 0xfa, 0x1b, 0xaa, 0x2c, 0xa0, 0x82, 0xc2, 0x2d, 0x3b, 0x45, 0xbf, 0x07, 0xcb, - 0xc8, 0xa7, 0x78, 0xb3, 0x40, 0xef, 0x03, 0x1e, 0xd1, 0xb7, 0xb2, 0x38, 0x09, 0x94, 0x46, 0xf7, - 0x97, 0xf1, 0xb6, 0xce, 0xd4, 0xbd, 0xbb, 0xfc, 0x0b, 0xea, 0xe2, 0x12, 0xda, 0x85, 0xea, 0x1b, - 0xca, 0x47, 0x67, 0x85, 0x5e, 0x53, 0x58, 0x86, 0x4b, 0xe8, 0x57, 0x00, 0xc6, 0xd5, 0x07, 0xc4, - 0xad, 0x44, 0xbc, 0x1f, 0x98, 0x0d, 0xee, 0x4b, 0x2d, 0x42, 0x27, 0xd4, 0x0b, 0x79, 0xa1, 0x96, - 0x69, 0x54, 0x2d, 0x83, 0x4b, 0x02, 0x5c, 0xbf, 0xa1, 0xdc, 0x3e, 0xe8, 0x17, 0xca, 0x83, 0x01, - 0x6d, 0x07, 0x7d, 0x25, 0x3b, 0xa4, 0x81, 0x3b, 0x3a, 0x43, 0x69, 0xb0, 0x9d, 0x22, 0x20, 0x8a, - 0xc5, 0xe1, 0xad, 0x0d, 0xbd, 0x8b, 0x20, 0x2f, 0x9b, 0xdb, 0xe3, 0x53, 0xa8, 0xab, 0x21, 0x50, - 0x6c, 0x2f, 0x8b, 0x5f, 0x65, 0x46, 0xea, 0xca, 0xc3, 0xe8, 0x0c, 0xb5, 0x13, 0x69, 0xd1, 0x42, - 0xc9, 0x79, 0x5a, 0x06, 0xcd, 0xb8, 0xa4, 0x5b, 0x44, 0x9d, 0xf5, 0xff, 0xd6, 0x22, 0x52, 0x02, - 0x97, 0xd0, 0x1f, 0x64, 0x8b, 0x48, 0xca, 0x0e, 0xdc, 0xd3, 0x88, 0xb1, 0x69, 0x72, 0xe6, 0xf3, - 0x4f, 0xfa, 0x24, 0x4e, 0xcd, 0x96, 0xb2, 0xb2, 0x06, 0xed, 0x6e, 0x44, 0x85, 0xbe, 0x7e, 0xe0, - 0x6f, 0x24, 0xaf, 0x57, 0x85, 0x9c, 0x3b, 0x4b, 0x40, 0x58, 0x1e, 0x9f, 0xa6, 0xa8, 0x81, 0xa2, - 0xe3, 0xa5, 0xfe, 0x47, 0x79, 0x71, 0xbd, 0xb1, 0xe7, 0xd0, 0x3c, 0x66, 0x93, 0xcb, 0x8f, 0x70, - 0xb2, 0x0f, 0xed, 0x77, 0xc1, 0xfc, 0xe3, 0x74, 0xbe, 0x86, 0xb6, 0x82, 0xe6, 0x46, 0xc7, 0x6c, - 0x3a, 0x0b, 0xd8, 0x8b, 0xf5, 0x7a, 0x37, 0x59, 0xbd, 0x3b, 0xbe, 0x8a, 0x07, 0xed, 0x4b, 0x68, - 0xff, 0x71, 0x41, 0xa3, 0xdb, 0x2e, 0x0b, 0x78, 0xe4, 0x4c, 0x78, 0x92, 0x0a, 0xc9, 0xfd, 0x80, - 0x92, 0x0d, 0x28, 0xa7, 0xa4, 0xaa, 0xbd, 0x99, 0xad, 0xac, 0x52, 0xbf, 0x7f, 0x87, 0x65, 0x8a, - 0xf6, 0x42, 0xb6, 0x89, 0x44, 0x73, 0x28, 0xfb, 0x17, 0x8a, 0xc6, 0x76, 0x9d, 0x8d, 0x0c, 0x2f, - 0x29, 0x80, 0x50, 0x79, 0x2f, 0xd1, 0xed, 0x66, 0x06, 0xf1, 0x2e, 0x69, 0x18, 0x90, 0x2c, 0x07, - 0xed, 0x46, 0x5a, 0x65, 0xa5, 0xb8, 0xdc, 0x5a, 0xea, 0x8f, 0x9a, 0x24, 0xd0, 0xa5, 0x37, 0x80, - 0xba, 0x56, 0x54, 0x7f, 0x4a, 0xa4, 0xff, 0x01, 0xf5, 0xa5, 0x97, 0x01, 0x2e, 0xa1, 0x67, 0xb2, - 0xc1, 0x12, 0x48, 0x9c, 0x05, 0xc1, 0x49, 0xa4, 0x66, 0x55, 0x96, 0x4f, 0x8e, 0x73, 0x89, 0x69, - 0xf4, 0x4c, 0x36, 0x5b, 0x3c, 0xf4, 0xe6, 0x5c, 0x01, 0xc6, 0x4e, 0x0e, 0xfa, 0xc8, 0x81, 0xfc, - 0x52, 0xfd, 0x29, 0x23, 0x19, 0x71, 0x91, 0x8a, 0x95, 0x55, 0xd1, 0x69, 0xf9, 0x1a, 0xda, 0x62, - 0x4b, 0x29, 0xc4, 0x35, 0x42, 0x09, 0x2a, 0xee, 0x6c, 0xde, 0x41, 0xab, 0xb8, 0x84, 0x5e, 0xcb, - 0xa3, 0x9a, 0x87, 0x59, 0xc5, 0x37, 0x47, 0x4e, 0x06, 0x97, 0x0e, 0x76, 0xfe, 0xfc, 0xe8, 0xc2, - 0xe3, 0xb3, 0xc5, 0xf9, 0xde, 0x84, 0xf9, 0x5f, 0x39, 0x02, 0x1f, 0x78, 0x4c, 0xfd, 0x7e, 0x25, - 0x35, 0xce, 0x6b, 0xf2, 0xff, 0xf1, 0x97, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x7c, 0xba, - 0xc9, 0x79, 0x17, 0x00, 0x00, -}