diff --git a/Makefile b/Makefile index 294b5316ba..2aebee1fbd 100644 --- a/Makefile +++ b/Makefile @@ -39,4 +39,4 @@ debug: go-quai: $(GORUN) build/ci.go build ./cmd/go-quai @echo "Done building." - @echo "Run \"$(GOBIN)/go-quai\" to launch go-quai." \ No newline at end of file + @echo "Run \"$(GOBIN)/go-quai\" to launch go-quai." diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index ed65f3e154..e5a6fb1a9a 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -117,7 +117,7 @@ func defaultNodeConfig() node.Config { } // makeFullNode loads quai configuration and creates the Quai backend. -func makeFullNode(p2p quai.NetworkingAPI, nodeLocation common.Location, slicesRunning []common.Location, currentExpansionNumber uint8, genesisBlock *types.Block, logger *log.Logger) (*node.Node, quaiapi.Backend) { +func makeFullNode(p2p quai.NetworkingAPI, nodeLocation common.Location, slicesRunning []common.Location, currentExpansionNumber uint8, genesisBlock *types.WorkObject, logger *log.Logger) (*node.Node, quaiapi.Backend) { stack, cfg := makeConfigNode(slicesRunning, nodeLocation, currentExpansionNumber, logger) startingExpansionNumber := viper.GetUint64(StartingExpansionNumberFlag.Name) backend, _ := RegisterQuaiService(stack, p2p, cfg.Quai, cfg.Node.NodeLocation.Context(), currentExpansionNumber, startingExpansionNumber, genesisBlock, logger) @@ -132,7 +132,7 @@ func makeFullNode(p2p quai.NetworkingAPI, nodeLocation common.Location, slicesRu // RegisterQuaiService adds a Quai client to the stack. // The second return value is the full node instance, which may be nil if the // node is running as a light client. -func RegisterQuaiService(stack *node.Node, p2p quai.NetworkingAPI, cfg quaiconfig.Config, nodeCtx int, currentExpansionNumber uint8, startingExpansionNumber uint64, genesisBlock *types.Block, logger *log.Logger) (quaiapi.Backend, error) { +func RegisterQuaiService(stack *node.Node, p2p quai.NetworkingAPI, cfg quaiconfig.Config, nodeCtx int, currentExpansionNumber uint8, startingExpansionNumber uint64, genesisBlock *types.WorkObject, logger *log.Logger) (quaiapi.Backend, error) { backend, err := quai.New(stack, p2p, &cfg, nodeCtx, currentExpansionNumber, startingExpansionNumber, genesisBlock, logger) if err != nil { Fatalf("Failed to register the Quai service: %v", err) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 10898aa7f1..7dcf9b9fcb 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1459,7 +1459,7 @@ func MakeChainDatabase(stack *node.Node, readonly bool) ethdb.Database { chainDb ethdb.Database ) name := "chaindata" - chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, viper.GetString(AncientDirFlag.Name), "", readonly) + chainDb, err = stack.OpenDatabaseWithFreezer(name, cache, handles, viper.GetString(AncientDirFlag.Name), "", readonly, stack.Config().NodeLocation) if err != nil { Fatalf("Could not open database: %v", err) } diff --git a/cmd/utils/hierarchical_coordinator.go b/cmd/utils/hierarchical_coordinator.go index ecf82dea4d..9ac23fe9e1 100644 --- a/cmd/utils/hierarchical_coordinator.go +++ b/cmd/utils/hierarchical_coordinator.go @@ -125,7 +125,7 @@ func (hc *HierarchicalCoordinator) StartQuaiBackend() (*quai.QuaiBackend, error) return quaiBackend, nil } -func (hc *HierarchicalCoordinator) startNode(logPath string, quaiBackend quai.ConsensusAPI, location common.Location, genesisBlock *types.Block) { +func (hc *HierarchicalCoordinator) startNode(logPath string, quaiBackend quai.ConsensusAPI, location common.Location, genesisBlock *types.WorkObject) { hc.wg.Add(1) logger := log.NewLogger(logPath, hc.logLevel) logger.Info("Starting Node at location", "location", location) @@ -133,7 +133,7 @@ func (hc *HierarchicalCoordinator) startNode(logPath string, quaiBackend quai.Co quaiBackend.SetApiBackend(&apiBackend, location) // Subscribe to the new topics after setting the api backend - hc.p2p.Subscribe(location, &types.Block{}) + hc.p2p.Subscribe(location, &types.WorkObject{}) hc.p2p.Subscribe(location, common.Hash{}) hc.p2p.Subscribe(location, &types.Transaction{}) @@ -195,7 +195,7 @@ func (hc *HierarchicalCoordinator) expansionEventLoop() { } } -func (hc *HierarchicalCoordinator) TriggerTreeExpansion(block *types.Block) error { +func (hc *HierarchicalCoordinator) TriggerTreeExpansion(block *types.WorkObject) error { // set the current expansion on all the backends currentRegions, currentZones := common.GetHierarchySizeForExpansionNumber(hc.currentExpansionNumber) newRegions, newZones := common.GetHierarchySizeForExpansionNumber(hc.currentExpansionNumber + 1) diff --git a/common/proto_common.pb.go b/common/proto_common.pb.go index 30b8fc2958..bb1508d011 100644 --- a/common/proto_common.pb.go +++ b/common/proto_common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.30.0 +// protoc v4.25.1 // source: common/proto_common.proto package common diff --git a/consensus/blake3pow/api.go b/consensus/blake3pow/api.go deleted file mode 100644 index cd532072d7..0000000000 --- a/consensus/blake3pow/api.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blake3pow - -import ( - "errors" - - "github.com/dominant-strategies/go-quai/common" - "github.com/dominant-strategies/go-quai/core/types" -) - -var errBlake3powStopped = errors.New("blake3pow stopped") - -// API exposes blake3pow related methods for the RPC interface. -type API struct { - blake3pow *Blake3pow -} - -// GetWork returns a work package for external miner. -// -// The work package consists of 3 strings: -// -// result[0] - 32 bytes hex encoded current block header pow-hash -// result[1] - 32 bytes hex encoded seed hash used for DAG -// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3] - hex encoded block number -func (api *API) GetWork() ([4]string, error) { - if api.blake3pow.remote == nil { - return [4]string{}, errors.New("not supported") - } - - var ( - workCh = make(chan [4]string, 1) - errc = make(chan error, 1) - ) - select { - case api.blake3pow.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: - case <-api.blake3pow.remote.exitCh: - return [4]string{}, errBlake3powStopped - } - select { - case work := <-workCh: - return work, nil - case err := <-errc: - return [4]string{}, err - } -} - -// SubmitWork can be used by external miner to submit their POW solution. -// It returns an indication if the work was accepted. -// Note either an invalid solution, a stale work a non-existent work will return false. -func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool { - if api.blake3pow.remote == nil { - return false - } - - var errc = make(chan error, 1) - select { - case api.blake3pow.remote.submitWorkCh <- &mineResult{ - nonce: nonce, - hash: hash, - errc: errc, - }: - case <-api.blake3pow.remote.exitCh: - return false - } - err := <-errc - return err == nil -} diff --git a/consensus/blake3pow/blake3pow.go b/consensus/blake3pow/blake3pow.go index b5738d5925..39ae9a730a 100644 --- a/consensus/blake3pow/blake3pow.go +++ b/consensus/blake3pow/blake3pow.go @@ -7,9 +7,7 @@ import ( "time" "github.com/dominant-strategies/go-quai/common" - "github.com/dominant-strategies/go-quai/consensus" "github.com/dominant-strategies/go-quai/log" - "github.com/dominant-strategies/go-quai/rpc" ) var ( @@ -55,7 +53,6 @@ type Blake3pow struct { rand *rand.Rand // Properly seeded random source for nonces threads int // Number of threads to mine on if mining update chan struct{} // Notification channel to update mining parameters - remote *remoteSealer // The fields below are hooks for testing shared *Blake3pow // Shared PoW verifier to avoid cache regeneration @@ -80,7 +77,6 @@ func New(config Config, notify []string, noverify bool, logger *log.Logger) *Bla if config.PowMode == ModeShared { blake3pow.shared = sharedBlake3pow } - blake3pow.remote = startRemoteSealer(blake3pow, notify, noverify) return blake3pow } @@ -141,19 +137,6 @@ func NewShared() *Blake3pow { return &Blake3pow{shared: sharedBlake3pow} } -// Close closes the exit channel to notify all backend threads exiting. -func (blake3pow *Blake3pow) Close() error { - blake3pow.closeOnce.Do(func() { - // Short circuit if the exit channel is not allocated. - if blake3pow.remote == nil { - return - } - close(blake3pow.remote.requestExit) - <-blake3pow.remote.exitCh - }) - return nil -} - // Threads returns the number of mining threads currently enabled. This doesn't // necessarily mean that mining is running! func (blake3pow *Blake3pow) Threads() int { @@ -184,23 +167,3 @@ func (blake3pow *Blake3pow) SetThreads(threads int) { } } } - -// APIs implements consensus.Engine, returning the user facing RPC APIs. -func (blake3pow *Blake3pow) APIs(chain consensus.ChainHeaderReader) []rpc.API { - // In order to ensure backward compatibility, we exposes blake3pow RPC APIs - // to both eth and blake3pow namespaces. - return []rpc.API{ - { - Namespace: "eth", - Version: "1.0", - Service: &API{blake3pow}, - Public: true, - }, - { - Namespace: "blake3pow", - Version: "1.0", - Service: &API{blake3pow}, - Public: true, - }, - } -} diff --git a/consensus/blake3pow/consensus.go b/consensus/blake3pow/consensus.go index 3cfc962dae..98ab1484db 100644 --- a/consensus/blake3pow/consensus.go +++ b/consensus/blake3pow/consensus.go @@ -64,13 +64,13 @@ var ( // Author implements consensus.Engine, returning the header's coinbase as the // proof-of-work verified author of the block. -func (blake3pow *Blake3pow) Author(header *types.Header) (common.Address, error) { +func (blake3pow *Blake3pow) Author(header *types.WorkObject) (common.Address, error) { return header.Coinbase(), nil } // VerifyHeader checks whether a header conforms to the consensus rules of the // stock Quai blake3pow engine. -func (blake3pow *Blake3pow) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { +func (blake3pow *Blake3pow) VerifyHeader(chain consensus.ChainHeaderReader, header *types.WorkObject) error { // If we're running a full engine faking, accept any input as valid if blake3pow.config.PowMode == ModeFullFake { return nil @@ -92,7 +92,7 @@ func (blake3pow *Blake3pow) VerifyHeader(chain consensus.ChainHeaderReader, head // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // concurrently. The method returns a quit channel to abort the operations and // a results channel to retrieve the async verifications. -func (blake3pow *Blake3pow) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { +func (blake3pow *Blake3pow) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.WorkObject) (chan<- struct{}, <-chan error) { // If we're running a full engine faking, accept any input as valid if blake3pow.config.PowMode == ModeFullFake || len(headers) == 0 { abort, results := make(chan struct{}), make(chan error, len(headers)) @@ -155,9 +155,9 @@ func (blake3pow *Blake3pow) VerifyHeaders(chain consensus.ChainHeaderReader, hea return abort, errorsOut } -func (blake3pow *Blake3pow) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, index int, unixNow int64) error { +func (blake3pow *Blake3pow) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.WorkObject, index int, unixNow int64) error { nodeCtx := blake3pow.config.NodeLocation.Context() - var parent *types.Header + var parent *types.WorkObject if index == 0 { parent = chain.GetHeader(headers[0].ParentHash(nodeCtx), headers[0].NumberU64(nodeCtx)-1) } else if headers[index-1].Hash() == headers[index].ParentHash(nodeCtx) { @@ -171,7 +171,7 @@ func (blake3pow *Blake3pow) verifyHeaderWorker(chain consensus.ChainHeaderReader // VerifyUncles verifies that the given block's uncles conform to the consensus // rules of the stock Quai blake3pow engine. -func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { +func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *types.WorkObject) error { nodeCtx := blake3pow.config.NodeLocation.Context() // If we're running a full engine faking, accept any input as valid if blake3pow.config.PowMode == ModeFullFake { @@ -185,7 +185,7 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ return nil } // Gather the set of past uncles and ancestors - uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.Header) + uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.WorkObject) number, parent := block.NumberU64(nodeCtx)-1, block.ParentHash(nodeCtx) for i := 0; i < 7; i++ { @@ -197,7 +197,7 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ // If the ancestor doesn't have any uncles, we don't have to iterate them if ancestorHeader.UncleHash() != types.EmptyUncleHash { // Need to add those uncles to the banned list too - ancestor := chain.GetBlock(parent, number) + ancestor := chain.GetWorkObject(parent) if ancestor == nil { break } @@ -207,7 +207,7 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ } parent, number = ancestorHeader.ParentHash(nodeCtx), number-1 } - ancestors[block.Hash()] = block.Header() + ancestors[block.Hash()] = block uncles.Add(block.Hash()) // Verify each of the uncles that it's recent, but not an ancestor @@ -223,23 +223,40 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ if ancestors[hash] != nil { return errUncleIsAncestor } - if ancestors[uncle.ParentHash(nodeCtx)] == nil || uncle.ParentHash(nodeCtx) == block.ParentHash(nodeCtx) { + if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash(nodeCtx) { return errDanglingUncle } - if err := blake3pow.verifyHeader(chain, uncle, ancestors[uncle.ParentHash(nodeCtx)], true, time.Now().Unix()); err != nil { + // Verify the seal and get the powHash for the given header + err := blake3pow.verifySeal(uncle) + if err != nil { return err } + + // Verify the block's difficulty based on its timestamp and parent's difficulty + // difficulty adjustment can only be checked in zone + if nodeCtx == common.ZONE_CTX { + parent := chain.GetHeaderByHash(uncle.ParentHash()) + expected := blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader()) + if expected.Cmp(uncle.Difficulty()) != 0 { + return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected) + } + } } return nil } // verifyHeader checks whether a header conforms to the consensus rules -func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, unixNow int64) error { +func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.WorkObject, uncle bool, unixNow int64) error { nodeCtx := blake3pow.config.NodeLocation.Context() // Ensure that the header's extra-data section is of a reasonable size if uint64(len(header.Extra())) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra()), params.MaximumExtraDataSize) } + // verify that the hash of the header in the Body matches the header hash specified in the work object header + expectedHeaderHash := header.Body().Header().Hash() + if header.HeaderHash() != expectedHeaderHash { + return fmt.Errorf("invalid header hash: have %v, want %v", header.HeaderHash(), expectedHeaderHash) + } // Verify the header's timestamp if !uncle { if header.Time() > uint64(unixNow+allowedFutureBlockTimeSeconds) { @@ -252,7 +269,7 @@ func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, head // Verify the block's difficulty based on its timestamp and parent's difficulty // difficulty adjustment can only be checked in zone if nodeCtx == common.ZONE_CTX { - expected := blake3pow.CalcDifficulty(chain, parent) + expected := blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader()) if expected.Cmp(header.Difficulty()) != 0 { return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty(), expected) } @@ -379,7 +396,7 @@ func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, head // check if the header coinbase is in scope _, err := header.Coinbase().InternalAddress() if err != nil { - return fmt.Errorf("out-of-scope coinbase in the header") + return fmt.Errorf("out-of-scope coinbase in the header: %v location: %v nodeLocation: %v", header.Coinbase(), header.Location(), blake3pow.config.NodeLocation) } // Verify that the gas limit is <= 2^63-1 cap := uint64(0x7fffffffffffffff) @@ -436,7 +453,7 @@ func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, head // CalcDifficulty is the difficulty adjustment algorithm. It returns // the difficulty that a new block should have when created at time // given the parent block's time and difficulty. -func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.Header) *big.Int { +func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int { nodeCtx := blake3pow.config.NodeLocation.Context() if nodeCtx != common.ZONE_CTX { @@ -454,7 +471,7 @@ func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, pa return new(big.Int).Div(parent.Difficulty(), big.NewInt(int64((blake3pow.NodeLocation().Region()+1)*(blake3pow.NodeLocation().Zone()+1)))) } - parentOfParent := chain.GetHeaderByHash(parent.ParentHash(nodeCtx)) + parentOfParent := chain.GetHeaderByHash(parent.ParentHash()) if parentOfParent == nil || chain.IsGenesisHash(parentOfParent.Hash()) { return parent.Difficulty() } @@ -482,7 +499,7 @@ func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, pa return x } -func (blake3pow *Blake3pow) IsDomCoincident(chain consensus.ChainHeaderReader, header *types.Header) bool { +func (blake3pow *Blake3pow) IsDomCoincident(chain consensus.ChainHeaderReader, header *types.WorkObject) bool { _, order, err := blake3pow.CalcOrder(header) if err != nil { return false @@ -491,19 +508,18 @@ func (blake3pow *Blake3pow) IsDomCoincident(chain consensus.ChainHeaderReader, h } // VerifySeal returns the PowHash and the verifySeal output -func (blake3pow *Blake3pow) VerifySeal(header *types.Header) (common.Hash, error) { +func (blake3pow *Blake3pow) VerifySeal(header *types.WorkObjectHeader) (common.Hash, error) { return header.Hash(), blake3pow.verifySeal(header) } // verifySeal checks whether a block satisfies the PoW difficulty requirements, // either using the usual blake3pow cache for it, or alternatively using a full DAG // to make remote mining fast. -func (blake3pow *Blake3pow) verifySeal(header *types.Header) error { - nodeCtx := blake3pow.config.NodeLocation.Context() +func (blake3pow *Blake3pow) verifySeal(header *types.WorkObjectHeader) error { // If we're running a fake PoW, accept any seal as valid if blake3pow.config.PowMode == ModeFake || blake3pow.config.PowMode == ModeFullFake { time.Sleep(blake3pow.fakeDelay) - if blake3pow.fakeFail == header.NumberU64(nodeCtx) { + if blake3pow.fakeFail == header.NumberU64() { return errInvalidPoW } return nil @@ -522,18 +538,18 @@ func (blake3pow *Blake3pow) verifySeal(header *types.Header) error { // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the blake3pow protocol. The changes are done inline. -func (blake3pow *Blake3pow) Prepare(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header) error { - header.SetDifficulty(blake3pow.CalcDifficulty(chain, parent)) +func (blake3pow *Blake3pow) Prepare(chain consensus.ChainHeaderReader, header *types.WorkObject, parent *types.WorkObject) error { + header.WorkObjectHeader().SetDifficulty(blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader())) return nil } // Finalize implements consensus.Engine, accumulating the block and uncle rewards, // setting the final state on the header -func (blake3pow *Blake3pow) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { +func (blake3pow *Blake3pow) Finalize(chain consensus.ChainHeaderReader, header *types.WorkObject, state *state.StateDB) { nodeLocation := blake3pow.config.NodeLocation nodeCtx := blake3pow.config.NodeLocation.Context() // Accumulate any block and uncle rewards and commit the final state root - accumulateRewards(chain.Config(), state, header, uncles, blake3pow.logger) + accumulateRewards(chain.Config(), state, header, blake3pow.logger) if nodeCtx == common.ZONE_CTX && chain.IsGenesisHash(header.ParentHash(nodeCtx)) { alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_"+nodeLocation.Name()+".json", blake3pow.logger) @@ -560,21 +576,22 @@ func (blake3pow *Blake3pow) Finalize(chain consensus.ChainHeaderReader, header * core.AddGenesisUtxos(state, nodeLocation, blake3pow.logger) } - header.SetUTXORoot(state.UTXORoot()) - header.SetEVMRoot(state.IntermediateRoot(true)) + header.Header().SetUTXORoot(state.UTXORoot()) + header.Header().SetEVMRoot(state.IntermediateRoot(true)) } // FinalizeAndAssemble implements consensus.Engine, accumulating the block and // uncle rewards, setting the final state and assembling the block. -func (blake3pow *Blake3pow) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.Block, error) { +func (blake3pow *Blake3pow) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.WorkObject, state *state.StateDB, txs []*types.Transaction, uncles []*types.WorkObjectHeader, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.WorkObject, error) { nodeCtx := blake3pow.config.NodeLocation.Context() if nodeCtx == common.ZONE_CTX && chain.ProcessingState() { // Finalize block - blake3pow.Finalize(chain, header, state, txs, uncles) + blake3pow.Finalize(chain, header, state) } + woBody := types.NewWorkObjectBody(header.Header(), txs, etxs, uncles, subManifest, receipts, trie.NewStackTrie(nil), nodeCtx) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, etxs, subManifest, receipts, trie.NewStackTrie(nil), nodeCtx), nil + return types.NewWorkObject(header.WorkObjectHeader(), woBody, nil, types.BlockObject), nil } // NodeLocation returns the location of the node @@ -582,15 +599,14 @@ func (blake3pow *Blake3pow) NodeLocation() common.Location { return blake3pow.config.NodeLocation } -func (blake3pow *Blake3pow) ComputePowLight(header *types.Header) (common.Hash, common.Hash) { +func (blake3pow *Blake3pow) ComputePowLight(header *types.WorkObjectHeader) (common.Hash, common.Hash) { panic("compute pow light doesnt exist for blake3") } // AccumulateRewards credits the coinbase of the given block with the mining // reward. The total reward consists of the static block reward and rewards for // included uncles. The coinbase of each uncle block is also rewarded. -func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header, logger *log.Logger) { - nodeCtx := config.Location.Context() +func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.WorkObject, logger *log.Logger) { // Select the correct block reward based on chain progression blockReward := misc.CalculateReward(header) @@ -610,26 +626,5 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header return } - // Accumulate the rewards for the miner and any included uncles - reward := new(big.Int).Set(blockReward) - r := new(big.Int) - for _, uncle := range uncles { - coinbase, err := uncle.Coinbase().InternalAddress() - if err != nil { - logger.WithFields(log.Fields{ - "Address": uncle.Coinbase().String(), - "Hash": uncle.Hash().String(), - }).Error("Found uncle with out of scope coinbase, skipping reward") - continue - } - r.Add(uncle.Number(nodeCtx), big8) - r.Sub(r, header.Number(nodeCtx)) - r.Mul(r, blockReward) - r.Div(r, big8) - state.AddBalance(coinbase, r) - - r.Div(blockReward, big32) - reward.Add(reward, r) - } - state.AddBalance(coinbase, reward) + state.AddBalance(coinbase, blockReward) } diff --git a/consensus/blake3pow/poem.go b/consensus/blake3pow/poem.go index bf3f149cc2..9d6fc48ebe 100644 --- a/consensus/blake3pow/poem.go +++ b/consensus/blake3pow/poem.go @@ -13,14 +13,14 @@ import ( ) // CalcOrder returns the order of the block within the hierarchy of chains -func (blake3pow *Blake3pow) CalcOrder(header *types.Header) (*big.Int, int, error) { +func (blake3pow *Blake3pow) CalcOrder(header *types.WorkObject) (*big.Int, int, error) { nodeCtx := blake3pow.config.NodeLocation.Context() if header.NumberU64(nodeCtx) == 0 { return big0, common.PRIME_CTX, nil } // Verify the seal and get the powHash for the given header - err := blake3pow.verifySeal(header) + err := blake3pow.verifySeal(header.WorkObjectHeader()) if err != nil { return big0, -1, err } @@ -68,7 +68,7 @@ func (blake3pow *Blake3pow) IntrinsicLogS(powHash common.Hash) *big.Int { } // TotalLogS() returns the total entropy reduction if the chain since genesis to the given header -func (blake3pow *Blake3pow) TotalLogS(chain consensus.GenesisReader, header *types.Header) *big.Int { +func (blake3pow *Blake3pow) TotalLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int { // Treating the genesis block differntly if chain.IsGenesisHash(header.Hash()) { return big.NewInt(0) @@ -94,7 +94,7 @@ func (blake3pow *Blake3pow) TotalLogS(chain consensus.GenesisReader, header *typ return big.NewInt(0) } -func (blake3pow *Blake3pow) TotalLogPhS(header *types.Header) *big.Int { +func (blake3pow *Blake3pow) TotalLogPhS(header *types.WorkObject) *big.Int { switch blake3pow.config.NodeLocation.Context() { case common.PRIME_CTX: totalS := header.ParentEntropy(common.PRIME_CTX) @@ -110,7 +110,7 @@ func (blake3pow *Blake3pow) TotalLogPhS(header *types.Header) *big.Int { return big.NewInt(0) } -func (blake3pow *Blake3pow) DeltaLogS(chain consensus.GenesisReader, header *types.Header) *big.Int { +func (blake3pow *Blake3pow) DeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int { // Treating the genesis block differntly if chain.IsGenesisHash(header.Hash()) { return big.NewInt(0) @@ -133,7 +133,7 @@ func (blake3pow *Blake3pow) DeltaLogS(chain consensus.GenesisReader, header *typ return big.NewInt(0) } -func (blake3pow *Blake3pow) UncledLogS(block *types.Block) *big.Int { +func (blake3pow *Blake3pow) UncledLogS(block *types.WorkObject) *big.Int { uncles := block.Uncles() totalUncledLogS := big.NewInt(0) for _, uncle := range uncles { @@ -149,7 +149,7 @@ func (blake3pow *Blake3pow) UncledLogS(block *types.Block) *big.Int { return totalUncledLogS } -func (blake3pow *Blake3pow) UncledSubDeltaLogS(chain consensus.GenesisReader, header *types.Header) *big.Int { +func (blake3pow *Blake3pow) UncledSubDeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int { // Treating the genesis block differntly if chain.IsGenesisHash(header.Hash()) { return big.NewInt(0) @@ -175,7 +175,7 @@ func (blake3pow *Blake3pow) UncledSubDeltaLogS(chain consensus.GenesisReader, he // CalcRank returns the rank of the block within the hierarchy of chains, this // determines the level of the interlink -func (blake3pow *Blake3pow) CalcRank(chain consensus.GenesisReader, header *types.Header) (int, error) { +func (blake3pow *Blake3pow) CalcRank(chain consensus.GenesisReader, header *types.WorkObject) (int, error) { if chain.IsGenesisHash(header.Hash()) { return 0, nil } diff --git a/consensus/blake3pow/sealer.go b/consensus/blake3pow/sealer.go index b8acb9af00..fe52791791 100644 --- a/consensus/blake3pow/sealer.go +++ b/consensus/blake3pow/sealer.go @@ -1,21 +1,14 @@ package blake3pow import ( - "bytes" - "context" crand "crypto/rand" - "encoding/json" "errors" "math" "math/big" "math/rand" - "net/http" "runtime" "sync" - "time" - "github.com/dominant-strategies/go-quai/common" - "github.com/dominant-strategies/go-quai/common/hexutil" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/log" ) @@ -33,10 +26,10 @@ var ( // Seal implements consensus.Engine, attempting to find a nonce that satisfies // the header's difficulty requirements. -func (blake3pow *Blake3pow) Seal(header *types.Header, results chan<- *types.Header, stop <-chan struct{}) error { +func (blake3pow *Blake3pow) Seal(header *types.WorkObject, results chan<- *types.WorkObject, stop <-chan struct{}) error { // If we're running a fake PoW, simply return a 0 nonce immediately if blake3pow.config.PowMode == ModeFake || blake3pow.config.PowMode == ModeFullFake { - header.SetNonce(types.BlockNonce{}) + header.WorkObjectHeader().SetNonce(types.BlockNonce{}) select { case results <- header: default: @@ -73,7 +66,7 @@ func (blake3pow *Blake3pow) Seal(header *types.Header, results chan<- *types.Hea } var ( pend sync.WaitGroup - locals = make(chan *types.Header) + locals = make(chan *types.WorkObject) ) for i := 0; i < threads; i++ { pend.Add(1) @@ -84,7 +77,7 @@ func (blake3pow *Blake3pow) Seal(header *types.Header, results chan<- *types.Hea } // Wait until sealing is terminated or a nonce is found go func() { - var result *types.Header + var result *types.WorkObject select { case <-stop: // Outside abort, stop all miner threads @@ -115,7 +108,7 @@ func (blake3pow *Blake3pow) Seal(header *types.Header, results chan<- *types.Hea // mine is the actual proof-of-work miner that searches for a nonce starting from // seed that results in correct final header difficulty. -func (blake3pow *Blake3pow) mine(header *types.Header, id int, seed uint64, abort chan struct{}, found chan *types.Header) { +func (blake3pow *Blake3pow) mine(header *types.WorkObject, id int, seed uint64, abort chan struct{}, found chan *types.WorkObject) { // Extract some data from the header var ( target = new(big.Int).Div(big2e256, header.Difficulty()) @@ -142,8 +135,8 @@ search: attempts = 0 } // Compute the PoW value of this nonce - header = types.CopyHeader(header) - header.SetNonce(types.EncodeNonce(nonce)) + header = types.CopyWorkObject(header) + header.WorkObjectHeader().SetNonce(types.EncodeNonce(nonce)) hash := header.Hash().Bytes() if powBuffer.SetBytes(hash).Cmp(target) <= 0 { // Correct nonce found, create a new header with it @@ -167,277 +160,3 @@ search: } } } - -// This is the timeout for HTTP requests to notify external miners. -const remoteSealerTimeout = 1 * time.Second - -type remoteSealer struct { - works map[common.Hash]*types.Header - rates map[common.Hash]hashrate - currentHeader *types.Header - currentWork [4]string - notifyCtx context.Context - cancelNotify context.CancelFunc // cancels all notification requests - reqWG sync.WaitGroup // tracks notification request goroutines - - blake3pow *Blake3pow - noverify bool - notifyURLs []string - results chan<- *types.Header - workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer - fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work - submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result - fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. - submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate - requestExit chan struct{} - exitCh chan struct{} -} - -// sealTask wraps a seal header with relative result channel for remote sealer thread. -type sealTask struct { - header *types.Header - results chan<- *types.Header -} - -// mineResult wraps the pow solution parameters for the specified block. -type mineResult struct { - nonce types.BlockNonce - hash common.Hash - - errc chan error -} - -// hashrate wraps the hash rate submitted by the remote sealer. -type hashrate struct { - id common.Hash - ping time.Time - rate uint64 - - done chan struct{} -} - -// sealWork wraps a seal work package for remote sealer. -type sealWork struct { - errc chan error - res chan [4]string -} - -func startRemoteSealer(blake3pow *Blake3pow, urls []string, noverify bool) *remoteSealer { - ctx, cancel := context.WithCancel(context.Background()) - s := &remoteSealer{ - blake3pow: blake3pow, - noverify: noverify, - notifyURLs: urls, - notifyCtx: ctx, - cancelNotify: cancel, - works: make(map[common.Hash]*types.Header), - rates: make(map[common.Hash]hashrate), - workCh: make(chan *sealTask), - fetchWorkCh: make(chan *sealWork), - submitWorkCh: make(chan *mineResult), - fetchRateCh: make(chan chan uint64), - submitRateCh: make(chan *hashrate), - requestExit: make(chan struct{}), - exitCh: make(chan struct{}), - } - go s.loop() - return s -} - -func (s *remoteSealer) loop() { - defer func() { - s.blake3pow.logger.Trace("Blake3pow remote sealer is exiting") - s.cancelNotify() - s.reqWG.Wait() - close(s.exitCh) - }() - - nodeCtx := s.blake3pow.config.NodeLocation.Context() - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - - for { - select { - case work := <-s.workCh: - // Update current work with new received header. - // Note same work can be past twice, happens when changing CPU threads. - s.results = work.results - s.makeWork(work.header) - s.notifyWork() - - case work := <-s.fetchWorkCh: - // Return current mining work to remote miner. - if s.currentHeader == nil { - work.errc <- errNoMiningWork - } else { - work.res <- s.currentWork - } - - case result := <-s.submitWorkCh: - // Verify submitted PoW solution based on maintained mining blocks. - if s.submitWork(result.nonce, result.hash) { - result.errc <- nil - } else { - result.errc <- errInvalidSealResult - } - - case result := <-s.submitRateCh: - // Trace remote sealer's hash rate by submitted value. - s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} - close(result.done) - - case req := <-s.fetchRateCh: - // Gather all hash rate submitted by remote sealer. - var total uint64 - for _, rate := range s.rates { - // this could overflow - total += rate.rate - } - req <- total - - case <-ticker.C: - // Clear stale submitted hash rate. - for id, rate := range s.rates { - if time.Since(rate.ping) > 10*time.Second { - delete(s.rates, id) - } - } - // Clear stale pending blocks - if s.currentHeader != nil { - for hash, header := range s.works { - if header.NumberU64(nodeCtx)+staleThreshold <= s.currentHeader.NumberU64(nodeCtx) { - delete(s.works, hash) - } - } - } - - case <-s.requestExit: - return - } - } -} - -// makeWork creates a work package for external miner. -// -// The work package consists of 3 strings: -// -// result[0], 32 bytes hex encoded current header pow-hash -// result[1], 32 bytes hex encoded seed hash used for DAG -// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3], hex encoded header number -func (s *remoteSealer) makeWork(header *types.Header) { - nodeCtx := s.blake3pow.config.NodeLocation.Context() - hash := header.SealHash() - s.currentWork[0] = hash.Hex() - s.currentWork[1] = hexutil.EncodeBig(header.Number(nodeCtx)) - s.currentWork[2] = common.BytesToHash(new(big.Int).Div(big2e256, header.Difficulty()).Bytes()).Hex() - - // Trace the seal work fetched by remote sealer. - s.currentHeader = header - s.works[hash] = header -} - -// notifyWork notifies all the specified mining endpoints of the availability of -// new work to be processed. -func (s *remoteSealer) notifyWork() { - work := s.currentWork - - // Encode the JSON payload of the notification. When NotifyFull is set, - // this is the complete block header, otherwise it is a JSON array. - var blob []byte - if s.blake3pow.config.NotifyFull { - blob, _ = json.Marshal(s.currentHeader) - } else { - blob, _ = json.Marshal(work) - } - - s.reqWG.Add(len(s.notifyURLs)) - for _, url := range s.notifyURLs { - go s.sendNotification(s.notifyCtx, url, blob, work) - } -} - -func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) { - defer s.reqWG.Done() - - req, err := http.NewRequest("POST", url, bytes.NewReader(json)) - if err != nil { - s.blake3pow.logger.WithField("err", err).Warn("Failed to create remote miner notification") - return - } - ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout) - defer cancel() - req = req.WithContext(ctx) - req.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - s.blake3pow.logger.WithField("err", err).Warn("Failed to notify remote miner") - } else { - s.blake3pow.logger.WithFields(log.Fields{ - "miner": url, - "hash": work[0], - "target": work[2], - }).Trace("Notified remote miner") - resp.Body.Close() - } -} - -// submitWork verifies the submitted pow solution, returning -// whether the solution was accepted or not (not can be both a bad pow as well as -// any other error, like no pending work or stale mining result). -func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) bool { - if s.currentHeader == nil { - s.blake3pow.logger.WithField("sealhash", sealhash).Warn("Pending work without block") - return false - } - nodeCtx := s.blake3pow.config.NodeLocation.Context() - // Make sure the work submitted is present - header := s.works[sealhash] - if header == nil { - s.blake3pow.logger.WithFields(log.Fields{ - "sealhash": sealhash, - "curnumber": s.currentHeader.NumberU64(nodeCtx), - }).Warn("Work submitted but none pending") - return false - } - // Verify the correctness of submitted result. - header.SetNonce(nonce) - - start := time.Now() - // Make sure the result channel is assigned. - if s.results == nil { - s.blake3pow.logger.Warn("Blake3pow result channel is empty, submitted mining result is rejected") - return false - } - s.blake3pow.logger.WithFields(log.Fields{ - "sealhash": sealhash, - "elapsed": common.PrettyDuration(time.Since(start)), - }).Trace("Verified correct proof-of-work") - - // Solutions seems to be valid, return to the miner and notify acceptance. - solution := header - - // The submitted solution is within the scope of acceptance. - if solution.NumberU64(nodeCtx)+staleThreshold > s.currentHeader.NumberU64(nodeCtx) { - select { - case s.results <- solution: - s.blake3pow.logger.WithFields(log.Fields{ - "number": solution.NumberU64(nodeCtx), - "sealhash": sealhash, - "hash": solution.Hash(), - }).Trace("Work submitted is acceptable") - return true - default: - s.blake3pow.logger.WithField("sealhash", sealhash).Warn("Sealing result is not read by miner") - return false - } - } - // The submitted block is too old to accept, drop it. - s.blake3pow.logger.WithFields(log.Fields{ - "number": solution.NumberU64(nodeCtx), - "sealhash": sealhash, - "hash": solution.Hash(), - }).Warn("Work submitted is too old") - return false -} diff --git a/consensus/consensus.go b/consensus/consensus.go index 1234bf626b..5be12b2b17 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -24,7 +24,6 @@ import ( "github.com/dominant-strategies/go-quai/core/state" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/params" - "github.com/dominant-strategies/go-quai/rpc" ) // ChainHeaderReader defines a small collection of methods needed to access the local @@ -34,16 +33,16 @@ type ChainHeaderReader interface { Config() *params.ChainConfig // CurrentHeader retrieves the current header from the local chain. - CurrentHeader() *types.Header + CurrentHeader() *types.WorkObject // GetHeader retrieves a block header from the database by hash and number. - GetHeader(hash common.Hash, number uint64) *types.Header + GetHeader(hash common.Hash, number uint64) *types.WorkObject // GetHeaderByNumber retrieves a block header from the database by number. - GetHeaderByNumber(number uint64) *types.Header + GetHeaderByNumber(number uint64) *types.WorkObject // GetHeaderByHash retrieves a block header from the database by its hash. - GetHeaderByHash(hash common.Hash) *types.Header + GetHeaderByHash(hash common.Hash) *types.WorkObject // GetTerminiByHash retrieves the termini for a given header hash GetTerminiByHash(hash common.Hash) *types.Termini @@ -52,13 +51,13 @@ type ChainHeaderReader interface { ProcessingState() bool // ComputeEfficiencyScore returns the efficiency score computed at each prime block - ComputeEfficiencyScore(header *types.Header) uint16 + ComputeEfficiencyScore(header *types.WorkObject) uint16 // IsGenesisHash returns true if the given hash is the genesis block hash. IsGenesisHash(hash common.Hash) bool // UpdateEtxEligibleSlices updates the etx eligible slice for the given zone location - UpdateEtxEligibleSlices(header *types.Header, location common.Location) common.Hash + UpdateEtxEligibleSlices(header *types.WorkObject, location common.Location) common.Hash } // ChainReader defines a small collection of methods needed to access the local @@ -67,7 +66,7 @@ type ChainReader interface { ChainHeaderReader // GetBlock retrieves a block from the database by hash and number. - GetBlock(hash common.Hash, number uint64) *types.Block + GetWorkObject(hash common.Hash) *types.WorkObject } type GenesisReader interface { @@ -80,78 +79,77 @@ type Engine interface { // Author retrieves the Quai address of the account that minted the given // block, which may be different from the header's coinbase if a consensus // engine is based on signatures. - Author(header *types.Header) (common.Address, error) + Author(header *types.WorkObject) (common.Address, error) // IntrinsicLogS returns the logarithm of the intrinsic entropy reduction of a PoW hash IntrinsicLogS(powHash common.Hash) *big.Int // CalcOrder returns the order of the block within the hierarchy of chains - CalcOrder(header *types.Header) (*big.Int, int, error) + CalcOrder(header *types.WorkObject) (*big.Int, int, error) // TotalLogS returns the log of the total entropy reduction if the chain since genesis to the given header - TotalLogS(chain GenesisReader, header *types.Header) *big.Int + TotalLogS(chain GenesisReader, header *types.WorkObject) *big.Int // TotalLogPhS returns the log of the total entropy reduction if the chain since genesis for a pending header - TotalLogPhS(header *types.Header) *big.Int + TotalLogPhS(header *types.WorkObject) *big.Int // DeltaLogS returns the log of the entropy delta for a chain since its prior coincidence - DeltaLogS(chain GenesisReader, header *types.Header) *big.Int + DeltaLogS(chain GenesisReader, header *types.WorkObject) *big.Int // UncledLogS returns the log of the entropy reduction by uncles referenced in the block - UncledLogS(block *types.Block) *big.Int + UncledLogS(block *types.WorkObject) *big.Int // UncledUncledSubDeltaLogS returns the log of the uncled entropy reduction since the past coincident - UncledSubDeltaLogS(chain GenesisReader, header *types.Header) *big.Int + UncledSubDeltaLogS(chain GenesisReader, header *types.WorkObject) *big.Int // CalcRank calculates the rank of the prime block - CalcRank(chain GenesisReader, header *types.Header) (int, error) + CalcRank(chain GenesisReader, header *types.WorkObject) (int, error) - ComputePowLight(header *types.Header) (mixHash, powHash common.Hash) + ComputePowLight(header *types.WorkObjectHeader) (mixHash, powHash common.Hash) // VerifyHeader checks whether a header conforms to the consensus rules of a // given engine. Verifying the seal may be done optionally here, or explicitly // via the VerifySeal method. - VerifyHeader(chain ChainHeaderReader, header *types.Header) error + VerifyHeader(chain ChainHeaderReader, header *types.WorkObject) error // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // concurrently. The method returns a quit channel to abort the operations and // a results channel to retrieve the async verifications (the order is that of // the input slice). - VerifyHeaders(chain ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) + VerifyHeaders(chain ChainHeaderReader, headers []*types.WorkObject) (chan<- struct{}, <-chan error) // VerifyUncles verifies that the given block's uncles conform to the consensus // rules of a given engine. - VerifyUncles(chain ChainReader, block *types.Block) error + VerifyUncles(chain ChainReader, wo *types.WorkObject) error // Prepare initializes the consensus fields of a block header according to the // rules of a particular engine. The changes are executed inline. - Prepare(chain ChainHeaderReader, header *types.Header, parent *types.Header) error + Prepare(chain ChainHeaderReader, header *types.WorkObject, parent *types.WorkObject) error // Finalize runs any post-transaction state modifications (e.g. block rewards) // but does not assemble the block. // // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). - Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, - uncles []*types.Header) + Finalize(chain ChainHeaderReader, header *types.WorkObject, state *state.StateDB) // FinalizeAndAssemble runs any post-transaction state modifications (e.g. block // rewards) and assembles the final block. // // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). - FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, etxs []*types.Transaction, manifest types.BlockManifest, receipts []*types.Receipt) (*types.Block, error) + FinalizeAndAssemble(chain ChainHeaderReader, woHeader *types.WorkObject, state *state.StateDB, txs []*types.Transaction, uncles []*types.WorkObjectHeader, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.WorkObject, error) // Seal generates a new sealing request for the given input block and pushes // the result into the given channel. // // Note, the method returns immediately and will send the result async. More // than one result may also be returned depending on the consensus algorithm. - Seal(header *types.Header, results chan<- *types.Header, stop <-chan struct{}) error + Seal(header *types.WorkObject, results chan<- *types.WorkObject, stop <-chan struct{}) error // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty // that a new block should have. - CalcDifficulty(chain ChainHeaderReader, parent *types.Header) *big.Int + CalcDifficulty(chain ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int // IsDomCoincident returns true if this block satisfies the difficulty order // of a dominant chain. If this node does not have a dominant chain (i.e. @@ -159,17 +157,11 @@ type Engine interface { // // Importantly, this check does NOT mean the block is canonical in the // dominant chain, or even that the claimed dominant difficulty is valid. - IsDomCoincident(chain ChainHeaderReader, header *types.Header) bool + IsDomCoincident(chain ChainHeaderReader, header *types.WorkObject) bool // VerifySeal computes the PowHash and checks if work meets the difficulty // requirement specified in header - VerifySeal(header *types.Header) (common.Hash, error) - - // APIs returns the RPC APIs this consensus engine provides. - APIs(chain ChainHeaderReader) []rpc.API - - // Close terminates any background threads maintained by the consensus engine. - Close() error + VerifySeal(header *types.WorkObjectHeader) (common.Hash, error) } func TargetToDifficulty(target *big.Int) *big.Int { diff --git a/consensus/misc/basefee.go b/consensus/misc/basefee.go index 795cf30482..038def1541 100644 --- a/consensus/misc/basefee.go +++ b/consensus/misc/basefee.go @@ -25,7 +25,7 @@ import ( ) // CalcBaseFee calculates the basefee of the header taking into account the basefee ceiling -func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { +func CalcBaseFee(config *params.ChainConfig, parent *types.WorkObject) *big.Int { calculatedBaseFee := calcBaseFee(config, parent) ceiling := big.NewInt(params.MaxBaseFee) if calculatedBaseFee.Cmp(ceiling) > 0 { @@ -35,7 +35,7 @@ func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { } // calcBaseFee calculates the basefee of the header. -func calcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int { +func calcBaseFee(config *params.ChainConfig, parent *types.WorkObject) *big.Int { var ( parentGasTarget = parent.GasLimit() / params.ElasticityMultiplier parentGasTargetBig = new(big.Int).SetUint64(parentGasTarget) diff --git a/consensus/misc/rewards.go b/consensus/misc/rewards.go index aa2bcd35f1..58387a7bd3 100644 --- a/consensus/misc/rewards.go +++ b/consensus/misc/rewards.go @@ -6,7 +6,7 @@ import ( "github.com/dominant-strategies/go-quai/core/types" ) -func CalculateReward(header *types.Header) *big.Int { +func CalculateReward(header *types.WorkObject) *big.Int { if header.Coinbase().IsInQiLedgerScope() { return calculateQiReward(header) } else { @@ -15,24 +15,24 @@ func CalculateReward(header *types.Header) *big.Int { } // Calculate the amount of Quai that Qi can be converted to. Expect the current Header and the Qi amount in "qits", returns the quai amount in "its" -func QiToQuai(currentHeader *types.Header, qiAmt *big.Int) *big.Int { +func QiToQuai(currentHeader *types.WorkObject, qiAmt *big.Int) *big.Int { quaiPerQi := new(big.Int).Div(calculateQuaiReward(currentHeader), calculateQiReward(currentHeader)) return new(big.Int).Mul(qiAmt, quaiPerQi) } // Calculate the amount of Qi that Quai can be converted to. Expect the current Header and the Quai amount in "its", returns the Qi amount in "qits" -func QuaiToQi(currentHeader *types.Header, quaiAmt *big.Int) *big.Int { +func QuaiToQi(currentHeader *types.WorkObject, quaiAmt *big.Int) *big.Int { qiPerQuai := new(big.Int).Div(calculateQiReward(currentHeader), calculateQuaiReward(currentHeader)) return new(big.Int).Mul(quaiAmt, qiPerQuai) } // CalculateQuaiReward calculates the quai that can be recieved for mining a block and returns value in its -func calculateQuaiReward(header *types.Header) *big.Int { +func calculateQuaiReward(header *types.WorkObject) *big.Int { return big.NewInt(1000000000000000000) } // CalculateQiReward caculates the qi that can be received for mining a block and returns value in qits -func calculateQiReward(header *types.Header) *big.Int { +func calculateQiReward(header *types.WorkObject) *big.Int { return big.NewInt(1000) } diff --git a/consensus/progpow/api.go b/consensus/progpow/api.go deleted file mode 100644 index 471eee247a..0000000000 --- a/consensus/progpow/api.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package progpow - -import ( - "errors" - - "github.com/dominant-strategies/go-quai/common" - "github.com/dominant-strategies/go-quai/core/types" -) - -var errProgpowStopped = errors.New("progpow stopped") - -// API exposes progpow related methods for the RPC interface. -type API struct { - progpow *Progpow -} - -// GetWork returns a work package for external miner. -// -// The work package consists of 3 strings: -// -// result[0] - 32 bytes hex encoded current block header pow-hash -// result[1] - 32 bytes hex encoded seed hash used for DAG -// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3] - hex encoded block number -func (api *API) GetWork() ([4]string, error) { - if api.progpow.remote == nil { - return [4]string{}, errors.New("not supported") - } - - var ( - workCh = make(chan [4]string, 1) - errc = make(chan error, 1) - ) - select { - case api.progpow.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: - case <-api.progpow.remote.exitCh: - return [4]string{}, errProgpowStopped - } - select { - case work := <-workCh: - return work, nil - case err := <-errc: - return [4]string{}, err - } -} - -// SubmitWork can be used by external miner to submit their POW solution. -// It returns an indication if the work was accepted. -// Note either an invalid solution, a stale work a non-existent work will return false. -func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool { - if api.progpow.remote == nil { - return false - } - - var errc = make(chan error, 1) - select { - case api.progpow.remote.submitWorkCh <- &mineResult{ - nonce: nonce, - hash: hash, - errc: errc, - }: - case <-api.progpow.remote.exitCh: - return false - } - err := <-errc - return err == nil -} diff --git a/consensus/progpow/consensus.go b/consensus/progpow/consensus.go index eed8d59bb0..58d32c960c 100644 --- a/consensus/progpow/consensus.go +++ b/consensus/progpow/consensus.go @@ -66,13 +66,13 @@ var ( // Author implements consensus.Engine, returning the header's coinbase as the // proof-of-work verified author of the block. -func (progpow *Progpow) Author(header *types.Header) (common.Address, error) { +func (progpow *Progpow) Author(header *types.WorkObject) (common.Address, error) { return header.Coinbase(), nil } // VerifyHeader checks whether a header conforms to the consensus rules of the // stock Quai progpow engine. -func (progpow *Progpow) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { +func (progpow *Progpow) VerifyHeader(chain consensus.ChainHeaderReader, header *types.WorkObject) error { nodeCtx := progpow.NodeLocation().Context() // If we're running a full engine faking, accept any input as valid if progpow.config.PowMode == ModeFullFake { @@ -94,7 +94,7 @@ func (progpow *Progpow) VerifyHeader(chain consensus.ChainHeaderReader, header * // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // concurrently. The method returns a quit channel to abort the operations and // a results channel to retrieve the async verifications. -func (progpow *Progpow) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { +func (progpow *Progpow) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.WorkObject) (chan<- struct{}, <-chan error) { // If we're running a full engine faking, accept any input as valid if progpow.config.PowMode == ModeFullFake || len(headers) == 0 { abort, results := make(chan struct{}), make(chan error, len(headers)) @@ -157,9 +157,9 @@ func (progpow *Progpow) VerifyHeaders(chain consensus.ChainHeaderReader, headers return abort, errorsOut } -func (progpow *Progpow) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, index int, unixNow int64) error { +func (progpow *Progpow) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.WorkObject, index int, unixNow int64) error { nodeCtx := progpow.NodeLocation().Context() - var parent *types.Header + var parent *types.WorkObject if index == 0 { parent = chain.GetHeader(headers[0].ParentHash(nodeCtx), headers[0].NumberU64(nodeCtx)-1) } else if headers[index-1].Hash() == headers[index].ParentHash(nodeCtx) { @@ -173,7 +173,7 @@ func (progpow *Progpow) verifyHeaderWorker(chain consensus.ChainHeaderReader, he // VerifyUncles verifies that the given block's uncles conform to the consensus // rules of the stock Quai progpow engine. -func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { +func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.WorkObject) error { nodeCtx := progpow.NodeLocation().Context() // If we're running a full engine faking, accept any input as valid if progpow.config.PowMode == ModeFullFake { @@ -187,7 +187,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.B return nil } // Gather the set of past uncles and ancestors - uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.Header) + uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.WorkObject) number, parent := block.NumberU64(nodeCtx)-1, block.ParentHash(nodeCtx) for i := 0; i < 7; i++ { @@ -199,7 +199,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.B // If the ancestor doesn't have any uncles, we don't have to iterate them if ancestorHeader.UncleHash() != types.EmptyUncleHash { // Need to add those uncles to the banned list too - ancestor := chain.GetBlock(parent, number) + ancestor := chain.GetWorkObject(parent) if ancestor == nil { break } @@ -209,7 +209,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.B } parent, number = ancestorHeader.ParentHash(nodeCtx), number-1 } - ancestors[block.Hash()] = block.Header() + ancestors[block.Hash()] = block uncles.Add(block.Hash()) // Verify each of the uncles that it's recent, but not an ancestor @@ -225,23 +225,40 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.B if ancestors[hash] != nil { return errUncleIsAncestor } - if ancestors[uncle.ParentHash(nodeCtx)] == nil || uncle.ParentHash(nodeCtx) == block.ParentHash(nodeCtx) { + if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash(nodeCtx) { return errDanglingUncle } - if err := progpow.verifyHeader(chain, uncle, ancestors[uncle.ParentHash(nodeCtx)], true, time.Now().Unix()); err != nil { + // Verify the seal and get the powHash for the given header + _, err := progpow.verifySeal(uncle) + if err != nil { return err } + + // Verify the block's difficulty based on its timestamp and parent's difficulty + // difficulty adjustment can only be checked in zone + if nodeCtx == common.ZONE_CTX { + parent := chain.GetHeaderByHash(uncle.ParentHash()) + expected := progpow.CalcDifficulty(chain, parent.WorkObjectHeader()) + if expected.Cmp(uncle.Difficulty()) != 0 { + return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected) + } + } } return nil } // verifyHeader checks whether a header conforms to the consensus rules -func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, unixNow int64) error { +func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.WorkObject, uncle bool, unixNow int64) error { nodeCtx := progpow.NodeLocation().Context() // Ensure that the header's extra-data section is of a reasonable size if uint64(len(header.Extra())) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra()), params.MaximumExtraDataSize) } + // verify that the hash of the header in the Body matches the header hash specified in the work object header + expectedHeaderHash := header.Body().Header().Hash() + if header.HeaderHash() != expectedHeaderHash { + return fmt.Errorf("invalid header hash: have %v, want %v", header.HeaderHash(), expectedHeaderHash) + } // Verify the header's timestamp if !uncle { if header.Time() > uint64(unixNow+allowedFutureBlockTimeSeconds) { @@ -254,7 +271,7 @@ func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header, // Verify the block's difficulty based on its timestamp and parent's difficulty // difficulty adjustment can only be checked in zone if nodeCtx == common.ZONE_CTX { - expected := progpow.CalcDifficulty(chain, parent) + expected := progpow.CalcDifficulty(chain, parent.WorkObjectHeader()) if expected.Cmp(header.Difficulty()) != 0 { return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty(), expected) } @@ -435,7 +452,7 @@ func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header, // CalcDifficulty is the difficulty adjustment algorithm. It returns // the difficulty that a new block should have when created at time // given the parent block's time and difficulty. -func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.Header) *big.Int { +func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int { nodeCtx := progpow.NodeLocation().Context() if nodeCtx != common.ZONE_CTX { @@ -452,7 +469,7 @@ func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent // Genesis Difficulty is the difficulty in the Genesis Block divided by the number of total slices active return new(big.Int).Div(parent.Difficulty(), big.NewInt(int64((progpow.NodeLocation().Region()+1)*(progpow.NodeLocation().Zone()+1)))) } - parentOfParent := chain.GetHeaderByHash(parent.ParentHash(nodeCtx)) + parentOfParent := chain.GetHeaderByHash(parent.ParentHash()) if parentOfParent == nil || chain.IsGenesisHash(parentOfParent.Hash()) { return parent.Difficulty() } @@ -480,7 +497,7 @@ func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent return x } -func (progpow *Progpow) IsDomCoincident(chain consensus.ChainHeaderReader, header *types.Header) bool { +func (progpow *Progpow) IsDomCoincident(chain consensus.ChainHeaderReader, header *types.WorkObject) bool { _, order, err := progpow.CalcOrder(header) if err != nil { return false @@ -488,8 +505,7 @@ func (progpow *Progpow) IsDomCoincident(chain consensus.ChainHeaderReader, heade return order < chain.Config().Location.Context() } -func (progpow *Progpow) ComputePowLight(header *types.Header) (mixHash, powHash common.Hash) { - nodeCtx := progpow.config.NodeLocation.Context() +func (progpow *Progpow) ComputePowLight(header *types.WorkObjectHeader) (mixHash, powHash common.Hash) { powLight := func(size uint64, cache []uint32, hash []byte, nonce uint64, blockNumber uint64) ([]byte, []byte) { ethashCache := progpow.cache(blockNumber) if ethashCache.cDag == nil { @@ -499,9 +515,9 @@ func (progpow *Progpow) ComputePowLight(header *types.Header) (mixHash, powHash } return progpowLight(size, cache, hash, nonce, blockNumber, ethashCache.cDag) } - cache := progpow.cache(header.NumberU64(nodeCtx)) - size := datasetSize(header.NumberU64(nodeCtx)) - digest, result := powLight(size, cache.cache, header.SealHash().Bytes(), header.NonceU64(), header.NumberU64(common.ZONE_CTX)) + cache := progpow.cache(header.NumberU64()) + size := datasetSize(header.NumberU64()) + digest, result := powLight(size, cache.cache, header.SealHash().Bytes(), header.NonceU64(), header.NumberU64()) mixHash = common.BytesToHash(digest) powHash = common.BytesToHash(result) header.PowDigest.Store(mixHash) @@ -515,19 +531,18 @@ func (progpow *Progpow) ComputePowLight(header *types.Header) (mixHash, powHash } // VerifySeal returns the PowHash and the verifySeal output -func (progpow *Progpow) VerifySeal(header *types.Header) (common.Hash, error) { +func (progpow *Progpow) VerifySeal(header *types.WorkObjectHeader) (common.Hash, error) { return progpow.verifySeal(header) } // verifySeal checks whether a block satisfies the PoW difficulty requirements, // either using the usual progpow cache for it, or alternatively using a full DAG // to make remote mining fast. -func (progpow *Progpow) verifySeal(header *types.Header) (common.Hash, error) { - nodeCtx := progpow.NodeLocation().Context() +func (progpow *Progpow) verifySeal(header *types.WorkObjectHeader) (common.Hash, error) { // If we're running a fake PoW, accept any seal as valid if progpow.config.PowMode == ModeFake || progpow.config.PowMode == ModeFullFake { time.Sleep(progpow.fakeDelay) - if progpow.fakeFail == header.NumberU64(nodeCtx) { + if progpow.fakeFail == header.NumberU64() { return common.Hash{}, errInvalidPoW } return common.Hash{}, nil @@ -559,18 +574,18 @@ func (progpow *Progpow) verifySeal(header *types.Header) (common.Hash, error) { // Prepare implements consensus.Engine, initializing the difficulty field of a // header to conform to the progpow protocol. The changes are done inline. -func (progpow *Progpow) Prepare(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header) error { - header.SetDifficulty(progpow.CalcDifficulty(chain, parent)) +func (progpow *Progpow) Prepare(chain consensus.ChainHeaderReader, header *types.WorkObject, parent *types.WorkObject) error { + header.WorkObjectHeader().SetDifficulty(progpow.CalcDifficulty(chain, parent.WorkObjectHeader())) return nil } // Finalize implements consensus.Engine, accumulating the block and uncle rewards, // setting the final state on the header -func (progpow *Progpow) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { +func (progpow *Progpow) Finalize(chain consensus.ChainHeaderReader, header *types.WorkObject, state *state.StateDB) { + nodeLocation := progpow.config.NodeLocation + nodeCtx := progpow.config.NodeLocation.Context() // Accumulate any block and uncle rewards and commit the final state root - accumulateRewards(chain.Config(), state, header, uncles, progpow.logger) - nodeLocation := progpow.NodeLocation() - nodeCtx := progpow.NodeLocation().Context() + accumulateRewards(chain.Config(), state, header, progpow.logger) if nodeCtx == common.ZONE_CTX && chain.IsGenesisHash(header.ParentHash(nodeCtx)) { alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_"+nodeLocation.Name()+".json", progpow.logger) @@ -596,21 +611,22 @@ func (progpow *Progpow) Finalize(chain consensus.ChainHeaderReader, header *type } core.AddGenesisUtxos(state, nodeLocation, progpow.logger) } - header.SetUTXORoot(state.UTXORoot()) - header.SetEVMRoot(state.IntermediateRoot(true)) + header.Header().SetUTXORoot(state.UTXORoot()) + header.Header().SetEVMRoot(state.IntermediateRoot(true)) } // FinalizeAndAssemble implements consensus.Engine, accumulating the block and // uncle rewards, setting the final state and assembling the block. -func (progpow *Progpow) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.Block, error) { - nodeCtx := progpow.NodeLocation().Context() +func (progpow *Progpow) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.WorkObject, state *state.StateDB, txs []*types.Transaction, uncles []*types.WorkObjectHeader, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.WorkObject, error) { + nodeCtx := progpow.config.NodeLocation.Context() if nodeCtx == common.ZONE_CTX && chain.ProcessingState() { // Finalize block - progpow.Finalize(chain, header, state, txs, uncles) + progpow.Finalize(chain, header, state) } + woBody := types.NewWorkObjectBody(header.Header(), txs, etxs, uncles, subManifest, receipts, trie.NewStackTrie(nil), nodeCtx) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, etxs, subManifest, receipts, trie.NewStackTrie(nil), nodeCtx), nil + return types.NewWorkObject(header.WorkObjectHeader(), woBody, nil, types.BlockObject), nil } func (progpow *Progpow) NodeLocation() common.Location { @@ -620,14 +636,16 @@ func (progpow *Progpow) NodeLocation() common.Location { // AccumulateRewards credits the coinbase of the given block with the mining // reward. The total reward consists of the static block reward and rewards for // included uncles. The coinbase of each uncle block is also rewarded. -func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header, logger *log.Logger) { +func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.WorkObject, logger *log.Logger) { // Select the correct block reward based on chain progression blockReward := misc.CalculateReward(header) - nodeCtx := config.Location.Context() coinbase, err := header.Coinbase().InternalAddress() if err != nil { - logger.WithField("hash", header.Hash().String()).Error("Block has out-of-scope coinbase, skipping block reward") + logger.WithFields(log.Fields{ + "Address": header.Coinbase().String(), + "Hash": header.Hash().String(), + }).Error("Block has out of scope coinbase, skipping block reward") return } if !header.Coinbase().IsInQuaiLedgerScope() { @@ -637,24 +655,5 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header }).Debug("Block coinbase is in Qi ledger, skipping Quai block reward") // this log is largely unnecessary return } - - // Accumulate the rewards for the miner and any included uncles - reward := new(big.Int).Set(blockReward) - r := new(big.Int) - for _, uncle := range uncles { - coinbase, err := uncle.Coinbase().InternalAddress() - if err != nil { - logger.WithField("hash", uncle.Hash().String()).Error("Uncle has out-of-scope coinbase, skipping reward") - continue - } - r.Add(uncle.Number(nodeCtx), big8) - r.Sub(r, header.Number(nodeCtx)) - r.Mul(r, blockReward) - r.Div(r, big8) - state.AddBalance(coinbase, r) - - r.Div(blockReward, big32) - reward.Add(reward, r) - } - state.AddBalance(coinbase, reward) + state.AddBalance(coinbase, blockReward) } diff --git a/consensus/progpow/poem.go b/consensus/progpow/poem.go index 3b8f91c710..cb93439bdb 100644 --- a/consensus/progpow/poem.go +++ b/consensus/progpow/poem.go @@ -13,7 +13,7 @@ import ( ) // CalcOrder returns the order of the block within the hierarchy of chains -func (progpow *Progpow) CalcOrder(header *types.Header) (*big.Int, int, error) { +func (progpow *Progpow) CalcOrder(header *types.WorkObject) (*big.Int, int, error) { nodeCtx := progpow.config.NodeLocation.Context() // Except for the slice [0,0] have to check if the header hash is the genesis hash if header.NumberU64(nodeCtx) == 0 { @@ -21,7 +21,7 @@ func (progpow *Progpow) CalcOrder(header *types.Header) (*big.Int, int, error) { } // Verify the seal and get the powHash for the given header - powHash, err := progpow.verifySeal(header) + powHash, err := progpow.verifySeal(header.WorkObjectHeader()) if err != nil { return big0, -1, err } @@ -69,7 +69,7 @@ func (progpow *Progpow) IntrinsicLogS(powHash common.Hash) *big.Int { } // TotalLogS() returns the total entropy reduction if the chain since genesis to the given header -func (progpow *Progpow) TotalLogS(chain consensus.GenesisReader, header *types.Header) *big.Int { +func (progpow *Progpow) TotalLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int { if chain.IsGenesisHash(header.Hash()) { return big.NewInt(0) } @@ -94,7 +94,7 @@ func (progpow *Progpow) TotalLogS(chain consensus.GenesisReader, header *types.H return big.NewInt(0) } -func (progpow *Progpow) TotalLogPhS(header *types.Header) *big.Int { +func (progpow *Progpow) TotalLogPhS(header *types.WorkObject) *big.Int { switch progpow.config.NodeLocation.Context() { case common.PRIME_CTX: totalS := header.ParentEntropy(common.PRIME_CTX) @@ -110,7 +110,7 @@ func (progpow *Progpow) TotalLogPhS(header *types.Header) *big.Int { return big.NewInt(0) } -func (progpow *Progpow) DeltaLogS(chain consensus.GenesisReader, header *types.Header) *big.Int { +func (progpow *Progpow) DeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int { intrinsicS, order, err := progpow.CalcOrder(header) if err != nil { return big.NewInt(0) @@ -129,7 +129,7 @@ func (progpow *Progpow) DeltaLogS(chain consensus.GenesisReader, header *types.H return big.NewInt(0) } -func (progpow *Progpow) UncledLogS(block *types.Block) *big.Int { +func (progpow *Progpow) UncledLogS(block *types.WorkObject) *big.Int { uncles := block.Uncles() totalUncledLogS := big.NewInt(0) for _, uncle := range uncles { @@ -145,7 +145,7 @@ func (progpow *Progpow) UncledLogS(block *types.Block) *big.Int { return totalUncledLogS } -func (progpow *Progpow) UncledSubDeltaLogS(chain consensus.GenesisReader, header *types.Header) *big.Int { +func (progpow *Progpow) UncledSubDeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int { // Treating the genesis block differntly if chain.IsGenesisHash(header.Hash()) { return big.NewInt(0) @@ -171,7 +171,7 @@ func (progpow *Progpow) UncledSubDeltaLogS(chain consensus.GenesisReader, header // CalcRank returns the rank of the block within the hierarchy of chains, this // determines the level of the interlink -func (progpow *Progpow) CalcRank(chain consensus.GenesisReader, header *types.Header) (int, error) { +func (progpow *Progpow) CalcRank(chain consensus.GenesisReader, header *types.WorkObject) (int, error) { if chain.IsGenesisHash(header.Hash()) { return 0, nil } @@ -184,7 +184,7 @@ func (progpow *Progpow) CalcRank(chain consensus.GenesisReader, header *types.He } // Verify the seal and get the powHash for the given header - powHash, err := progpow.verifySeal(header) + powHash, err := progpow.verifySeal(header.WorkObjectHeader()) if err != nil { return 0, err } diff --git a/consensus/progpow/progpow.go b/consensus/progpow/progpow.go index ddff0a8e09..037499aa91 100644 --- a/consensus/progpow/progpow.go +++ b/consensus/progpow/progpow.go @@ -15,9 +15,7 @@ import ( "unsafe" "github.com/dominant-strategies/go-quai/common" - "github.com/dominant-strategies/go-quai/consensus" "github.com/dominant-strategies/go-quai/log" - "github.com/dominant-strategies/go-quai/rpc" mmap "github.com/edsrzf/mmap-go" "github.com/hashicorp/golang-lru/simplelru" ) @@ -170,7 +168,6 @@ type Progpow struct { rand *rand.Rand // Properly seeded random source for nonces threads int // Number of threads to mine on if mining update chan struct{} // Notification channel to update mining parameters - remote *remoteSealer // The fields below are hooks for testing shared *Progpow // Shared PoW verifier to avoid cache regeneration @@ -206,7 +203,6 @@ func New(config Config, notify []string, noverify bool, logger *log.Logger) *Pro if config.PowMode == ModeShared { progpow.shared = sharedProgpow } - progpow.remote = startRemoteSealer(progpow, notify, noverify) return progpow } @@ -267,19 +263,6 @@ func NewShared() *Progpow { return &Progpow{shared: sharedProgpow} } -// Close closes the exit channel to notify all backend threads exiting. -func (progpow *Progpow) Close() error { - progpow.closeOnce.Do(func() { - // Short circuit if the exit channel is not allocated. - if progpow.remote == nil { - return - } - close(progpow.remote.requestExit) - <-progpow.remote.exitCh - }) - return nil -} - // lru tracks caches or datasets by their last use time, keeping at most N of them. type lru struct { what string @@ -465,23 +448,3 @@ func (progpow *Progpow) SetThreads(threads int) { } } } - -// APIs implements consensus.Engine, returning the user facing RPC APIs. -func (progpow *Progpow) APIs(chain consensus.ChainHeaderReader) []rpc.API { - // In order to ensure backward compatibility, we exposes progpow RPC APIs - // to both eth and progpow namespaces. - return []rpc.API{ - { - Namespace: "eth", - Version: "1.0", - Service: &API{progpow}, - Public: true, - }, - { - Namespace: "progpow", - Version: "1.0", - Service: &API{progpow}, - Public: true, - }, - } -} diff --git a/consensus/progpow/sealer.go b/consensus/progpow/sealer.go index c63062776f..2644cffac4 100644 --- a/consensus/progpow/sealer.go +++ b/consensus/progpow/sealer.go @@ -1,21 +1,15 @@ package progpow import ( - "bytes" - "context" crand "crypto/rand" - "encoding/json" "errors" "math" "math/big" "math/rand" - "net/http" "runtime" "sync" - "time" "github.com/dominant-strategies/go-quai/common" - "github.com/dominant-strategies/go-quai/common/hexutil" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/log" ) @@ -33,10 +27,10 @@ var ( // Seal implements consensus.Engine, attempting to find a nonce that satisfies // the header's difficulty requirements. -func (progpow *Progpow) Seal(header *types.Header, results chan<- *types.Header, stop <-chan struct{}) error { +func (progpow *Progpow) Seal(header *types.WorkObject, results chan<- *types.WorkObject, stop <-chan struct{}) error { // If we're running a fake PoW, simply return a 0 nonce immediately if progpow.config.PowMode == ModeFake || progpow.config.PowMode == ModeFullFake { - header.SetNonce(types.BlockNonce{}) + header.WorkObjectHeader().SetNonce(types.BlockNonce{}) select { case results <- header: default: @@ -73,7 +67,7 @@ func (progpow *Progpow) Seal(header *types.Header, results chan<- *types.Header, } var ( pend sync.WaitGroup - locals = make(chan *types.Header) + locals = make(chan *types.WorkObject) ) for i := 0; i < threads; i++ { pend.Add(1) @@ -84,7 +78,7 @@ func (progpow *Progpow) Seal(header *types.Header, results chan<- *types.Header, } // Wait until sealing is terminated or a nonce is found go func() { - var result *types.Header + var result *types.WorkObject select { case <-stop: // Outside abort, stop all miner threads @@ -115,7 +109,7 @@ func (progpow *Progpow) Seal(header *types.Header, results chan<- *types.Header, // mine is the actual proof-of-work miner that searches for a nonce starting from // seed that results in correct final block difficulty. -func (progpow *Progpow) mine(header *types.Header, id int, seed uint64, abort chan struct{}, found chan *types.Header) { +func (progpow *Progpow) mine(header *types.WorkObject, id int, seed uint64, abort chan struct{}, found chan *types.WorkObject) { // Extract some data from the header var ( target = new(big.Int).Div(big2e256, header.Difficulty()) @@ -154,8 +148,8 @@ search: digest, result := powLight(size, cache.cache, header.SealHash().Bytes(), nonce, header.NumberU64(common.ZONE_CTX)) if new(big.Int).SetBytes(result).Cmp(target) <= 0 { // Correct nonce found, create a new header with it - header = types.CopyHeader(header) - header.SetNonce(types.EncodeNonce(nonce)) + header = types.CopyWorkObject(header) + header.WorkObjectHeader().SetNonce(types.EncodeNonce(nonce)) hashBytes := common.BytesToHash(digest) header.SetMixHash(hashBytes) found <- header @@ -165,284 +159,3 @@ search: } } } - -// This is the timeout for HTTP requests to notify external miners. -const remoteSealerTimeout = 1 * time.Second - -type remoteSealer struct { - works map[common.Hash]*types.Header - rates map[common.Hash]hashrate - currentHeader *types.Header - currentWork [4]string - notifyCtx context.Context - cancelNotify context.CancelFunc // cancels all notification requests - reqWG sync.WaitGroup // tracks notification request goroutines - - progpow *Progpow - noverify bool - notifyURLs []string - results chan<- *types.Header - workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer - fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work - submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result - fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. - submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate - requestExit chan struct{} - exitCh chan struct{} -} - -// sealTask wraps a seal header with relative result channel for remote sealer thread. -type sealTask struct { - header *types.Header - results chan<- *types.Header -} - -// mineResult wraps the pow solution parameters for the specified block. -type mineResult struct { - nonce types.BlockNonce - hash common.Hash - - errc chan error -} - -// hashrate wraps the hash rate submitted by the remote sealer. -type hashrate struct { - id common.Hash - ping time.Time - rate uint64 - - done chan struct{} -} - -// sealWork wraps a seal work package for remote sealer. -type sealWork struct { - errc chan error - res chan [4]string -} - -func startRemoteSealer(progpow *Progpow, urls []string, noverify bool) *remoteSealer { - ctx, cancel := context.WithCancel(context.Background()) - s := &remoteSealer{ - progpow: progpow, - noverify: noverify, - notifyURLs: urls, - notifyCtx: ctx, - cancelNotify: cancel, - works: make(map[common.Hash]*types.Header), - rates: make(map[common.Hash]hashrate), - workCh: make(chan *sealTask), - fetchWorkCh: make(chan *sealWork), - submitWorkCh: make(chan *mineResult), - fetchRateCh: make(chan chan uint64), - submitRateCh: make(chan *hashrate), - requestExit: make(chan struct{}), - exitCh: make(chan struct{}), - } - go s.loop() - return s -} - -func (s *remoteSealer) loop() { - defer func() { - s.progpow.logger.Trace("Progpow remote sealer is exiting") - s.cancelNotify() - s.reqWG.Wait() - close(s.exitCh) - }() - - nodeCtx := s.progpow.config.NodeLocation.Context() - - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - - for { - select { - case work := <-s.workCh: - // Update current work with new received header. - // Note same work can be past twice, happens when changing CPU threads. - s.results = work.results - s.makeWork(work.header) - s.notifyWork() - - case work := <-s.fetchWorkCh: - // Return current mining work to remote miner. - if s.currentHeader == nil { - work.errc <- errNoMiningWork - } else { - work.res <- s.currentWork - } - - case result := <-s.submitWorkCh: - // Verify submitted PoW solution based on maintained mining blocks. - if s.submitWork(result.nonce, result.hash) { - result.errc <- nil - } else { - result.errc <- errInvalidSealResult - } - - case result := <-s.submitRateCh: - // Trace remote sealer's hash rate by submitted value. - s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} - close(result.done) - - case req := <-s.fetchRateCh: - // Gather all hash rate submitted by remote sealer. - var total uint64 - for _, rate := range s.rates { - // this could overflow - total += rate.rate - } - req <- total - - case <-ticker.C: - // Clear stale submitted hash rate. - for id, rate := range s.rates { - if time.Since(rate.ping) > 10*time.Second { - delete(s.rates, id) - } - } - // Clear stale pending blocks - if s.currentHeader != nil { - for hash, header := range s.works { - if header.NumberU64(nodeCtx)+staleThreshold <= s.currentHeader.NumberU64(nodeCtx) { - delete(s.works, hash) - } - } - } - - case <-s.requestExit: - return - } - } -} - -// makeWork creates a work package for external miner. -// -// The work package consists of 3 strings: -// -// result[0], 32 bytes hex encoded current header pow-hash -// result[1], 32 bytes hex encoded seed hash used for DAG -// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty -// result[3], hex encoded header number -func (s *remoteSealer) makeWork(header *types.Header) { - nodeCtx := s.progpow.config.NodeLocation.Context() - hash := header.SealHash() - s.currentWork[0] = hash.Hex() - s.currentWork[1] = hexutil.EncodeBig(header.Number(nodeCtx)) - s.currentWork[2] = common.BytesToHash(new(big.Int).Div(big2e256, header.Difficulty()).Bytes()).Hex() - - // Trace the seal work fetched by remote sealer. - s.currentHeader = header - s.works[hash] = header -} - -// notifyWork notifies all the specified mining endpoints of the availability of -// new work to be processed. -func (s *remoteSealer) notifyWork() { - work := s.currentWork - - // Encode the JSON payload of the notification. When NotifyFull is set, - // this is the complete block header, otherwise it is a JSON array. - var blob []byte - if s.progpow.config.NotifyFull { - blob, _ = json.Marshal(s.currentHeader) - } else { - blob, _ = json.Marshal(work) - } - - s.reqWG.Add(len(s.notifyURLs)) - for _, url := range s.notifyURLs { - go s.sendNotification(s.notifyCtx, url, blob, work) - } -} - -func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) { - defer s.reqWG.Done() - - req, err := http.NewRequest("POST", url, bytes.NewReader(json)) - if err != nil { - s.progpow.logger.WithField("err", err).Warn("Failed to create remote miner notification") - return - } - ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout) - defer cancel() - req = req.WithContext(ctx) - req.Header.Set("Content-Type", "application/json") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - s.progpow.logger.WithField("err", err).Warn("Failed to notify remote miner") - } else { - s.progpow.logger.WithFields(log.Fields{ - "miner": url, - "hash": work[0], - "target": work[2], - }).Trace("Notified remote miner") - resp.Body.Close() - } -} - -// submitWork verifies the submitted pow solution, returning -// whether the solution was accepted or not (not can be both a bad pow as well as -// any other error, like no pending work or stale mining result). -func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) bool { - if s.currentHeader == nil { - s.progpow.logger.WithField("sealhash", sealhash).Warn("Pending work without block") - return false - } - nodeCtx := s.progpow.config.NodeLocation.Context() - // Make sure the work submitted is present - header := s.works[sealhash] - if header == nil { - s.progpow.logger.WithFields(log.Fields{ - "sealhash": sealhash, - "curnumber": s.currentHeader.NumberU64(nodeCtx), - }).Warn("Work submitted but none pending") - return false - } - // Verify the correctness of submitted result. - header.SetNonce(nonce) - - start := time.Now() - if !s.noverify { - panic("submit work with verification not supported") - } - // Make sure the result channel is assigned. - if s.results == nil { - s.progpow.logger.Warn("Progpow result channel is empty, submitted mining result is rejected") - return false - } - s.progpow.logger.WithFields(log.Fields{ - "sealhash": sealhash, - "elapsed": common.PrettyDuration(time.Since(start)), - }).Trace("Verified correct proof-of-work") - - // Solutions seems to be valid, return to the miner and notify acceptance. - solution := header - - // The submitted solution is within the scope of acceptance. - if solution.NumberU64(nodeCtx)+staleThreshold > s.currentHeader.NumberU64(nodeCtx) { - select { - case s.results <- solution: - s.progpow.logger.WithFields(log.Fields{ - "number": solution.NumberU64(nodeCtx), - "sealhash": sealhash, - "hash": solution.Hash(), - }).Trace("Work submitted is acceptable") - return true - default: - s.progpow.logger.WithFields(log.Fields{ - "mode": "remote", - "sealhash": sealhash, - }).Warn("Sealing result is not read by miner") - return false - } - } - // The submitted block is too old to accept, drop it. - s.progpow.logger.WithFields(log.Fields{ - "number": solution.NumberU64(nodeCtx), - "sealhash": sealhash, - "hash": solution.Hash(), - }).Trace("Work submitted is too old") - return false -} diff --git a/core/block_validator.go b/core/block_validator.go index 9bc7854531..56cdf5001c 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -52,7 +52,7 @@ func NewBlockValidator(config *params.ChainConfig, headerChain *HeaderChain, eng // ValidateBody validates the given block's uncles and verifies the block // header's transaction and uncle roots. The headers are assumed to be already // validated at this point. -func (v *BlockValidator) ValidateBody(block *types.Block) error { +func (v *BlockValidator) ValidateBody(block *types.WorkObject) error { nodeCtx := v.config.Location.Context() // Check whether the block's known, and if not, that it's linkable if nodeCtx == common.ZONE_CTX && v.hc.ProcessingState() { @@ -74,7 +74,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if len(block.Uncles()) != 0 { return fmt.Errorf("region body has non zero uncles") } - subManifestHash := types.DeriveSha(block.SubManifest(), trie.NewStackTrie(nil)) + subManifestHash := types.DeriveSha(block.Manifest(), trie.NewStackTrie(nil)) if subManifestHash == types.EmptyRootHash || subManifestHash != header.ManifestHash(nodeCtx+1) { // If we have a subordinate chain, it is impossible for the subordinate manifest to be empty return ErrBadSubManifest @@ -107,7 +107,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { // transition, such as amount of used gas, the receipt roots and the state root // itself. ValidateState returns a database batch if the validation was a success // otherwise nil and an error is returned. -func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, utxoEtxs []*types.Transaction, etxSet *types.EtxSet, usedGas uint64) error { +func (v *BlockValidator) ValidateState(block *types.WorkObject, statedb *state.StateDB, receipts types.Receipts, utxoEtxs []*types.Transaction, etxSet *types.EtxSet, usedGas uint64) error { start := time.Now() header := types.CopyHeader(block.Header()) time1 := common.PrettyDuration(time.Since(start)) @@ -179,7 +179,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD // CalcGasLimit computes the gas limit of the next block after parent. It aims // to keep the baseline gas close to the provided target, and increase it towards // the target if the baseline gas is lower. -func CalcGasLimit(parent *types.Header, gasCeil uint64) uint64 { +func CalcGasLimit(parent *types.WorkObject, gasCeil uint64) uint64 { // No Gas for TimeToStartTx days worth of zone blocks, this gives enough time to // onboard new miners into the slice if parent.NumberU64(common.ZONE_CTX) < params.TimeToStartTx { diff --git a/core/bloom_indexer.go b/core/bloom_indexer.go index ef219c46a1..25d4da9b68 100644 --- a/core/bloom_indexer.go +++ b/core/bloom_indexer.go @@ -55,7 +55,7 @@ func NewBloomIndexer(db ethdb.Database, size, confirms uint64, nodeCtx int, logg size: size, logger: logger, } - table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix)) + table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix), db.Location()) return NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits", nodeCtx, logger) } @@ -70,7 +70,7 @@ func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHea // Process implements core.ChainIndexerBackend, adding a new header's bloom into // the index. -func (b *BloomIndexer) Process(ctx context.Context, header *types.Header, bloom types.Bloom) error { +func (b *BloomIndexer) Process(ctx context.Context, header *types.WorkObject, bloom types.Bloom) error { b.gen.AddBloom(uint(header.NumberU64(common.ZONE_CTX)-b.section*b.size), bloom) b.head = header.Hash() return nil diff --git a/core/bodydb.go b/core/bodydb.go index 3dc8e6744e..40262944c1 100644 --- a/core/bodydb.go +++ b/core/bodydb.go @@ -40,6 +40,7 @@ type BodyDb struct { blockCache *lru.Cache bodyCache *lru.Cache bodyProtoCache *lru.Cache + woCache *lru.Cache processor *StateProcessor slicesRunning []common.Location @@ -68,6 +69,7 @@ func NewBodyDb(db ethdb.Database, engine consensus.Engine, hc *HeaderChain, chai bc.blockCache = blockCache bc.bodyCache = bodyCache bc.bodyProtoCache = bodyRLPCache + bc.woCache, _ = lru.New(bodyCacheLimit) } else { blockCache, _ := lru.New(10) bodyCache, _ := lru.New(10) @@ -75,6 +77,7 @@ func NewBodyDb(db ethdb.Database, engine consensus.Engine, hc *HeaderChain, chai bc.blockCache = blockCache bc.bodyCache = bodyCache bc.bodyProtoCache = bodyRLPCache + bc.woCache, _ = lru.New(10) } // only start the state processor in zone @@ -87,7 +90,7 @@ func NewBodyDb(db ethdb.Database, engine consensus.Engine, hc *HeaderChain, chai } // Append -func (bc *BodyDb) Append(block *types.Block, newInboundEtxs types.Transactions) ([]*types.Log, error) { +func (bc *BodyDb) Append(block *types.WorkObject, newInboundEtxs types.Transactions) ([]*types.Log, error) { bc.chainmu.Lock() defer bc.chainmu.Unlock() @@ -131,10 +134,10 @@ func (bc *BodyDb) ProcessingState() bool { } // WriteBlock write the block to the bodydb database -func (bc *BodyDb) WriteBlock(block *types.Block) { +func (bc *BodyDb) WriteBlock(block *types.WorkObject, nodeCtx int) { // add the block to the cache as well bc.blockCache.Add(block.Hash(), block) - rawdb.WriteBlock(bc.db, block, bc.NodeCtx()) + rawdb.WriteWorkObject(bc.db, block.Hash(), block, types.BlockObject, nodeCtx) } // HasBlock checks if a block is fully present in the database or not. @@ -152,16 +155,16 @@ func (bc *BodyDb) Engine() consensus.Engine { // GetBlock retrieves a block from the database by hash and number, // caching it if found. -func (bc *BodyDb) GetBlock(hash common.Hash, number uint64) *types.Block { +func (bc *BodyDb) GetBlock(hash common.Hash, number uint64) *types.WorkObject { termini := rawdb.ReadTermini(bc.db, hash) if termini == nil { return nil } // Short circuit if the block's already in the cache, retrieve otherwise if block, ok := bc.blockCache.Get(hash); ok { - return block.(*types.Block) + return block.(*types.WorkObject) } - block := rawdb.ReadBlock(bc.db, hash, number, bc.NodeLocation()) + block := rawdb.ReadWorkObject(bc.db, hash, types.BlockObject) if block == nil { return nil } @@ -170,10 +173,30 @@ func (bc *BodyDb) GetBlock(hash common.Hash, number uint64) *types.Block { return block } +// GetWorkObject retrieves a workObject from the database by hash, +// caching it if found. +func (bc *BodyDb) GetWorkObject(hash common.Hash) *types.WorkObject { + termini := rawdb.ReadTermini(bc.db, hash) + if termini == nil { + return nil + } + // Short circuit if the block's already in the cache, retrieve otherwise + if wo, ok := bc.woCache.Get(hash); ok { + return wo.(*types.WorkObject) + } + wo := rawdb.ReadWorkObject(bc.db, hash, types.BlockObject) + if wo == nil { + return nil + } + // Cache the found block for next time and return + bc.woCache.Add(wo.Hash(), wo) + return wo +} + // GetBlockOrCandidate retrieves any known block from the database by hash and number, // caching it if found. -func (bc *BodyDb) GetBlockOrCandidate(hash common.Hash, number uint64) *types.Block { - block := rawdb.ReadBlock(bc.db, hash, number, bc.NodeLocation()) +func (bc *BodyDb) GetBlockOrCandidate(hash common.Hash, number uint64) *types.WorkObject { + block := rawdb.ReadWorkObject(bc.db, hash, types.BlockObject) if block == nil { return nil } diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 06d44e1df3..f1c28bf5c4 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -42,7 +42,7 @@ type ChainIndexerBackend interface { // Process crunches through the next header in the chain segment. The caller // will ensure a sequential order of headers. - Process(ctx context.Context, header *types.Header, bloom types.Bloom) error + Process(ctx context.Context, header *types.WorkObject, bloom types.Bloom) error // Commit finalizes the section metadata and stores it into the database. Commit() error @@ -54,7 +54,7 @@ type ChainIndexerBackend interface { // ChainIndexerChain interface is used for connecting the indexer to a blockchain type ChainIndexerChain interface { // CurrentHeader retrieves the latest locally known header. - CurrentHeader() *types.Header + CurrentHeader() *types.WorkObject // GetBloom retrieves the bloom for the given block hash. GetBloom(blockhash common.Hash) (*types.Bloom, error) // SubscribeChainHeadEvent subscribes to new head header notifications. @@ -173,7 +173,7 @@ func (c *ChainIndexer) Close() error { // eventLoop is a secondary - optional - event loop of the indexer which is only // started for the outermost indexer to push chain head events into a processing // queue. -func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainHeadEvent, sub event.Subscription, nodeCtx int) { +func (c *ChainIndexer) eventLoop(currentHeader *types.WorkObject, events chan ChainHeadEvent, sub event.Subscription, nodeCtx int) { // Mark the chain indexer as active, requiring an additional teardown atomic.StoreUint32(&c.active, 1) @@ -200,7 +200,7 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, events chan ChainH errc <- nil return } - header := ev.Block.Header() + header := ev.Block if header.ParentHash(nodeCtx) != prevHash { // Reorg to the common ancestor if needed (might not exist in light sync mode, skip reorg then) // TODO: This seems a bit brittle, can we detect this case explicitly? diff --git a/core/chain_makers.go b/core/chain_makers.go deleted file mode 100644 index 0b230aa7d3..0000000000 --- a/core/chain_makers.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "fmt" - "math/big" - - "github.com/dominant-strategies/go-quai/common" - "github.com/dominant-strategies/go-quai/consensus" - "github.com/dominant-strategies/go-quai/consensus/misc" - "github.com/dominant-strategies/go-quai/core/state" - "github.com/dominant-strategies/go-quai/core/types" - "github.com/dominant-strategies/go-quai/core/vm" - "github.com/dominant-strategies/go-quai/ethdb" - "github.com/dominant-strategies/go-quai/params" -) - -// BlockGen creates blocks for testing. -// See GenerateChain for a detailed explanation. -type BlockGen struct { - i int - parent *types.Block - chain []*types.Block - header *types.Header - statedb *state.StateDB - - gasPool *GasPool - txs []*types.Transaction - receipts []*types.Receipt - uncles []*types.Header - etxs []*types.Transaction - subManifest types.BlockManifest - - config *params.ChainConfig - engine consensus.Engine -} - -// SetCoinbase sets the coinbase of the generated block. -// It can be called at most once. -func (b *BlockGen) SetCoinbase(addr common.Address) { - if b.gasPool != nil { - if len(b.txs) > 0 { - panic("coinbase must be set before adding transactions") - } - panic("coinbase can only be set once") - } - b.header.SetCoinbase(addr) - b.gasPool = new(GasPool).AddGas(b.header.GasLimit()) -} - -// SetExtra sets the extra data field of the generated block. -func (b *BlockGen) SetExtra(data []byte) { - b.header.SetExtra(data) -} - -// SetNonce sets the nonce field of the generated block. -func (b *BlockGen) SetNonce(nonce types.BlockNonce) { - b.header.SetNonce(nonce) -} - -// SetDifficulty sets the difficulty field of the generated block. This method is -// useful for Clique tests where the difficulty does not depend on time. For the -// progpow tests, please use OffsetTime, which implicitly recalculates the diff. -func (b *BlockGen) SetDifficulty(diff *big.Int) { - b.header.SetDifficulty(diff) -} - -// AddTx adds a transaction to the generated block. If no coinbase has -// been set, the block's coinbase is set to the zero address. -// -// AddTx panics if the transaction cannot be executed. In addition to -// the protocol-imposed limitations (gas limit, etc.), there are some -// further limitations on the content of transactions that can be -// added. Notably, contract code relying on the BLOCKHASH instruction -// will panic during execution. -func (b *BlockGen) AddTx(tx *types.Transaction, etxRLimit, etxPLimit *int) { - b.AddTxWithChain(nil, tx, etxRLimit, etxPLimit) -} - -// AddTxWithChain adds a transaction to the generated block. If no coinbase has -// been set, the block's coinbase is set to the zero address. -// -// AddTxWithChain panics if the transaction cannot be executed. In addition to -// the protocol-imposed limitations (gas limit, etc.), there are some -// further limitations on the content of transactions that can be -// added. If contract code relies on the BLOCKHASH instruction, -// the block in chain will be returned. -func (b *BlockGen) AddTxWithChain(hc *HeaderChain, tx *types.Transaction, etxRLimit, etxPLimit *int) { - if b.gasPool == nil { - b.SetCoinbase(common.ZeroAddress(hc.config.Location)) - } - b.statedb.Prepare(tx.Hash(), len(b.txs)) - coinbase := b.header.Coinbase() - gasUsed := b.header.GasUsed() - receipt, err := ApplyTransaction(b.config, nil, hc, &coinbase, b.gasPool, b.statedb, b.header, tx, &gasUsed, vm.Config{}, etxRLimit, etxPLimit, hc.logger) - if err != nil { - panic(err) - } - b.txs = append(b.txs, tx) - b.receipts = append(b.receipts, receipt) -} - -// GetBalance returns the balance of the given address at the generated block. -func (b *BlockGen) GetBalance(addr common.Address) *big.Int { - internal, err := addr.InternalAddress() - if err != nil { - panic(err.Error()) - } - return b.statedb.GetBalance(internal) -} - -// AddUncheckedTx forcefully adds a transaction to the block without any -// validation. -// -// AddUncheckedTx will cause consensus failures when used during real -// chain processing. This is best used in conjunction with raw block insertion. -func (b *BlockGen) AddUncheckedTx(tx *types.Transaction) { - b.txs = append(b.txs, tx) -} - -// Number returns the block number of the block being generated. -func (b *BlockGen) Number() *big.Int { - return new(big.Int).Set(b.header.Number(b.config.Location.Context())) -} - -// BaseFee returns the base fee of the block being generated. -func (b *BlockGen) BaseFee() *big.Int { - return new(big.Int).Set(b.header.BaseFee()) -} - -// AddUncheckedReceipt forcefully adds a receipts to the block without a -// backing transaction. -// -// AddUncheckedReceipt will cause consensus failures when used during real -// chain processing. This is best used in conjunction with raw block insertion. -func (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) { - b.receipts = append(b.receipts, receipt) -} - -// TxNonce returns the next valid transaction nonce for the -// account at addr. It panics if the account does not exist. -func (b *BlockGen) TxNonce(addr common.Address) uint64 { - internal, err := addr.InternalAddress() - if err != nil { - panic(err.Error()) - } - if !b.statedb.Exist(internal) { - panic("account does not exist") - } - return b.statedb.GetNonce(internal) -} - -// AddUncle adds an uncle header to the generated block. -func (b *BlockGen) AddUncle(h *types.Header) { - b.uncles = append(b.uncles, h) -} - -// PrevBlock returns a previously generated block by number. It panics if -// num is greater or equal to the number of the block being generated. -// For index -1, PrevBlock returns the parent block given to GenerateChain. -func (b *BlockGen) PrevBlock(index int) *types.Block { - if index >= b.i { - panic(fmt.Errorf("block index %d out of range (%d,%d)", index, -1, b.i)) - } - if index == -1 { - return b.parent - } - return b.chain[index] -} - -// OffsetTime modifies the time instance of a block, implicitly changing its -// associated difficulty. It's useful to test scenarios where forking is not -// tied to chain length directly. -func (b *BlockGen) OffsetTime(seconds int64) { - b.header.SetTime(b.header.Time() + uint64(seconds)) - if b.header.Time() <= b.parent.Header().Time() { - panic("block time out of range") - } - chainreader := &fakeChainReader{config: b.config} - b.header.SetDifficulty(b.engine.CalcDifficulty(chainreader, b.parent.Header())) -} - -// GenerateChain creates a chain of n blocks. The first block's -// parent will be the provided parent. db is used to store -// intermediate states and should contain the parent's state trie. -// -// The generator function is called with a new block generator for -// every block. Any transactions and uncles added to the generator -// become part of the block. If gen is nil, the blocks will be empty -// and their coinbase will be the zero address. -// -// Blocks created by GenerateChain do not contain valid proof of work -// values. Inserting them into BlockChain requires use of FakePow or -// a similar non-validating proof of work implementation. -func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) { - if config == nil { - config = params.TestChainConfig - } - blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) - chainreader := &fakeChainReader{config: config} - genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { - b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} - b.header = makeHeader(chainreader, parent, statedb, b.engine) - - // Execute any user modifications to the block - if gen != nil { - gen(i, b) - } - if b.engine != nil { - // Finalize and seal the block - block, _ := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.etxs, b.subManifest, b.receipts) - - // Write state changes to db - root, err := statedb.Commit(true) - if err != nil { - panic(fmt.Sprintf("state write error: %v", err)) - } - utxoRoot, err := statedb.CommitUTXOs() - if err != nil { - panic(fmt.Sprintf("state write error: %v", err)) - } - if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil { - panic(fmt.Sprintf("trie write error: %v", err)) - } - if err := statedb.UTXODatabase().TrieDB().Commit(utxoRoot, false, nil); err != nil { - panic(fmt.Sprintf("trie write error: %v", err)) - } - return block, b.receipts - } - return nil, nil - } - for i := 0; i < n; i++ { - statedb, err := state.New(parent.EVMRoot(), parent.UTXORoot(), state.NewDatabase(db), state.NewDatabase(db), nil, config.Location) - if err != nil { - panic(err) - } - block, receipt := genblock(i, parent, statedb) - blocks[i] = block - receipts[i] = receipt - parent = block - } - return blocks, receipts -} - -func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { - var time uint64 - if parent.Time() == 0 { - time = 10 - } else { - time = parent.Time() + 10 // block time is fixed at 10 seconds - } - nodeCtx := chain.Config().Location.Context() - - // Temporary header values just to calc difficulty - diffheader := types.EmptyHeader() - diffheader.SetDifficulty(parent.Difficulty(nodeCtx)) - diffheader.SetNumber(parent.Number(nodeCtx), nodeCtx) - diffheader.SetTime(time - 10) - diffheader.SetUncleHash(parent.UncleHash()) - - // Make new header - header := types.EmptyHeader() - header.SetUTXORoot(state.UTXORoot()) - header.SetEVMRoot(state.IntermediateRoot(true)) - header.SetParentHash(parent.Hash(), nodeCtx) - header.SetCoinbase(parent.Coinbase()) - header.SetDifficulty(engine.CalcDifficulty(chain, diffheader)) - header.SetGasLimit(parent.GasLimit()) - header.SetNumber(new(big.Int).Add(parent.Number(nodeCtx), common.Big1), nodeCtx) - header.SetTime(time) - header.SetBaseFee(misc.CalcBaseFee(chain.Config(), parent.Header())) - return header -} - -// makeHeaderChain creates a deterministic chain of headers rooted at parent. -func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header { - blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed) - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - return headers -} - -// makeBlockChain creates a deterministic chain of blocks rooted at parent. -func makeBlockChain(parent *types.Block, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Block { - blocks, _ := GenerateChain(params.TestChainConfig, parent, engine, db, n, func(i int, b *BlockGen) { - b.SetCoinbase(common.BytesToAddress(common.InternalAddress{0: byte(seed), 19: byte(i)}.Bytes(), params.TestChainConfig.Location)) - }) - return blocks -} - -type fakeChainReader struct { - config *params.ChainConfig -} - -// Config returns the chain configuration. -func (cr *fakeChainReader) Config() *params.ChainConfig { - return cr.config -} - -func (cr *fakeChainReader) CurrentHeader() *types.Header { return nil } -func (cr *fakeChainReader) GetHeaderByNumber(number uint64) *types.Header { return nil } -func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header { return nil } -func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } -func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } -func (cr *fakeChainReader) GetTerminiByHash(hash common.Hash) *types.Termini { return nil } -func (cr *fakeChainReader) ProcessingState() bool { return false } -func (cr *fakeChainReader) ComputeEfficiencyScore(header *types.Header) uint16 { return 0 } -func (cr *fakeChainReader) IsGenesisHash(hash common.Hash) bool { return false } -func (cr *fakeChainReader) UpdateEtxEligibleSlices(header *types.Header, location common.Location) common.Hash { - return common.Hash{} -} -func (cr *fakeChainReader) IsSliceSetToReceiveEtx(header *types.Header, location common.Location) bool { - return false -} diff --git a/core/core.go b/core/core.go index 530f1c327b..5d436f2aa3 100644 --- a/core/core.go +++ b/core/core.go @@ -77,7 +77,7 @@ type IndexerConfig struct { IndexAddressUtxos bool } -func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.Header) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, currentExpansionNumber uint8, genesisBlock *types.Block, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, indexerConfig *IndexerConfig, genesis *Genesis, logger *log.Logger) (*Core, error) { +func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.WorkObject) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, currentExpansionNumber uint8, genesisBlock *types.WorkObject, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, indexerConfig *IndexerConfig, genesis *Genesis, logger *log.Logger) (*Core, error) { slice, err := NewSlice(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, currentExpansionNumber, genesisBlock, domClientUrl, subClientUrls, engine, cacheConfig, indexerConfig, vmConfig, genesis, logger) if err != nil { return nil, err @@ -109,14 +109,14 @@ func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.H // caching any pending blocks which cannot yet be appended. InsertChain return // the number of blocks which were successfully consumed (either appended, or // cached), and an error. -func (c *Core) InsertChain(blocks types.Blocks) (int, error) { +func (c *Core) InsertChain(blocks types.WorkObjects) (int, error) { nodeLocation := c.NodeLocation() nodeCtx := c.NodeCtx() for idx, block := range blocks { // Only attempt to append a block, if it is not coincident with our dominant // chain. If it is dom coincident, then the dom chain node in our slice needs // to initiate the append. - _, order, err := c.CalcOrder(block.Header()) + _, order, err := c.CalcOrder(block) if err != nil { return idx, err } @@ -126,12 +126,12 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { c.processingCache.Add(block.Hash(), 1) } else { c.logger.WithFields(log.Fields{ - "Number": block.Header().NumberArray(), + "Number": block.NumberArray(), "Hash": block.Hash(), }).Info("Already processing block") return idx, errors.New("Already in process of appending this block") } - newPendingEtxs, _, _, err := c.sl.Append(block.Header(), types.EmptyHeader(), common.Hash{}, false, nil) + newPendingEtxs, _, _, err := c.sl.Append(block, types.EmptyHeader(c.NodeCtx()), common.Hash{}, false, nil) c.processingCache.Remove(block.Hash()) if err == nil { // If we have a dom, send the dom any pending ETXs which will become @@ -139,7 +139,7 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { // subordinate block manifest, then ETXs produced by this block and the rollup // of ETXs produced by subordinate chain(s) will become referencable. if nodeCtx > common.PRIME_CTX { - pendingEtx := types.PendingEtxs{block.Header(), newPendingEtxs} + pendingEtx := types.PendingEtxs{Header: block, Etxs: newPendingEtxs} // Only send the pending Etxs to dom if valid, because in the case of running a slice, for the zones that the node doesn't run, it cannot have the etxs generated if pendingEtx.IsValid(trie.NewStackTrie(nil)) { if err := c.SendPendingEtxsToDom(pendingEtx); err != nil { @@ -151,6 +151,8 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { } } c.removeFromAppendQueue(block) + } else if err.Error() == ErrKnownBlock.Error() { + c.removeFromAppendQueue(block) } else if err.Error() == consensus.ErrFutureBlock.Error() || err.Error() == ErrBodyNotFound.Error() || err.Error() == ErrPendingEtxNotFound.Error() || @@ -158,16 +160,16 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { err.Error() == consensus.ErrUnknownAncestor.Error() || err.Error() == ErrSubNotSyncedToDom.Error() || err.Error() == ErrDomClientNotUp.Error() { - if c.sl.CurrentInfo(block.Header()) { + if c.sl.CurrentInfo(block) { c.logger.WithFields(log.Fields{ - "Number": block.Header().NumberArray(), + "Number": block.NumberArray(), "Hash": block.Hash(), "err": err, }).Info("Cannot append yet.") } else { c.logger.WithFields(log.Fields{ "loc": c.NodeLocation().Name(), - "Number": block.Header().NumberArray(), + "Number": block.NumberArray(), "Hash": block.Hash(), "err": err, }).Debug("Cannot append yet.") @@ -175,7 +177,7 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { if err.Error() == ErrSubNotSyncedToDom.Error() || err.Error() == ErrPendingEtxNotFound.Error() { if nodeCtx != common.ZONE_CTX && c.sl.subClients[block.Location().SubIndex(nodeLocation)] != nil { - c.sl.subClients[block.Location().SubIndex(nodeLocation)].DownloadBlocksInManifest(context.Background(), block.Hash(), block.SubManifest(), block.ParentEntropy(nodeCtx)) + c.sl.subClients[block.Location().SubIndex(nodeLocation)].DownloadBlocksInManifest(context.Background(), block.Hash(), block.Manifest(), block.ParentEntropy(nodeCtx)) } } return idx, ErrPendingBlock @@ -279,11 +281,11 @@ func (c *Core) serviceBlocks(hashNumberList []types.HashAndNumber) { parentBlock := c.sl.hc.GetBlockOrCandidate(block.ParentHash(c.NodeCtx()), block.NumberU64(c.NodeCtx())-1) if parentBlock != nil { // If parent header is dom, send a signal to dom to request for the block if it doesnt have it - _, parentHeaderOrder, err := c.sl.engine.CalcOrder(parentBlock.Header()) + _, parentHeaderOrder, err := c.sl.engine.CalcOrder(parentBlock) if err != nil { c.logger.WithFields(log.Fields{ "Hash": parentBlock.Hash(), - "Number": parentBlock.Header().NumberArray(), + "Number": parentBlock.NumberArray(), }).Info("Error calculating the parent block order in serviceBlocks") continue } @@ -299,7 +301,7 @@ func (c *Core) serviceBlocks(hashNumberList []types.HashAndNumber) { } } c.addToQueueIfNotAppended(parentBlock) - _, err = c.InsertChain([]*types.Block{block}) + _, err = c.InsertChain([]*types.WorkObject{block}) if err != nil && err.Error() == ErrPendingBlock.Error() { // Best check here would be to check the first hash in each Fork, until we do that // checking the first item in the sorted hashNumberList will do @@ -361,7 +363,7 @@ func (c *Core) RequestDomToAppendOrFetch(hash common.Hash, entropy *big.Int, ord } // addToQueueIfNotAppended checks if block is appended and if its not adds the block to appendqueue -func (c *Core) addToQueueIfNotAppended(block *types.Block) { +func (c *Core) addToQueueIfNotAppended(block *types.WorkObject) { // Check if the hash is in the blockchain, otherwise add it to the append queue if c.GetHeaderByHash(block.Hash()) == nil { c.addToAppendQueue(block) @@ -369,9 +371,9 @@ func (c *Core) addToQueueIfNotAppended(block *types.Block) { } // addToAppendQueue adds a block to the append queue -func (c *Core) addToAppendQueue(block *types.Block) error { +func (c *Core) addToAppendQueue(block *types.WorkObject) error { nodeCtx := c.NodeLocation().Context() - _, order, err := c.engine.CalcOrder(block.Header()) + _, order, err := c.engine.CalcOrder(block) if err != nil { return err } @@ -382,7 +384,7 @@ func (c *Core) addToAppendQueue(block *types.Block) error { } // removeFromAppendQueue removes a block from the append queue -func (c *Core) removeFromAppendQueue(block *types.Block) { +func (c *Core) removeFromAppendQueue(block *types.WorkObject) { c.appendQueue.Remove(block.Hash()) } @@ -461,7 +463,7 @@ func (c *Core) SubscribeMissingBlockEvent(ch chan<- types.BlockRequest) event.Su // InsertChainWithoutSealVerification works exactly the same // except for seal verification, seal verification is omitted -func (c *Core) InsertChainWithoutSealVerification(block *types.Block) (int, error) { +func (c *Core) InsertChainWithoutSealVerification(block *types.WorkObject) (int, error) { return 0, nil } @@ -499,7 +501,7 @@ func (c *Core) Stop() { //---------------// // WriteBlock write the block to the bodydb database -func (c *Core) WriteBlock(block *types.Block) { +func (c *Core) WriteBlock(block *types.WorkObject) { c.writeBlockLock.Lock() defer c.writeBlockLock.Unlock() nodeCtx := c.NodeCtx() @@ -514,7 +516,7 @@ func (c *Core) WriteBlock(block *types.Block) { } if c.GetHeaderByHash(block.Hash()) == nil { // Only add non dom blocks to the append queue - _, order, err := c.CalcOrder(block.Header()) + _, order, err := c.CalcOrder(block) if err != nil { return } @@ -522,7 +524,7 @@ func (c *Core) WriteBlock(block *types.Block) { parentHeader := c.GetHeaderByHash(block.ParentHash(nodeCtx)) if parentHeader != nil { c.sl.WriteBlock(block) - c.InsertChain([]*types.Block{block}) + c.InsertChain([]*types.WorkObject{block}) } c.addToAppendQueue(block) // If a dom block comes in and we havent appended it yet @@ -538,7 +540,7 @@ func (c *Core) WriteBlock(block *types.Block) { } } -func (c *Core) Append(header *types.Header, manifest types.BlockManifest, domPendingHeader *types.Header, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { +func (c *Core) Append(header *types.WorkObject, manifest types.BlockManifest, domPendingHeader *types.WorkObject, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { nodeCtx := c.NodeCtx() newPendingEtxs, subReorg, setHead, err := c.sl.Append(header, domPendingHeader, domTerminus, domOrigin, newInboundEtxs) if err != nil { @@ -584,15 +586,15 @@ func (c *Core) DownloadBlocksInManifest(blockHash common.Hash, manifest types.Bl if block != nil { // If a prime block comes in if c.sl.subClients[block.Location().SubIndex(c.NodeLocation())] != nil { - c.sl.subClients[block.Location().SubIndex(c.NodeLocation())].DownloadBlocksInManifest(context.Background(), block.Hash(), block.SubManifest(), block.ParentEntropy(c.NodeCtx())) + c.sl.subClients[block.Location().SubIndex(c.NodeLocation())].DownloadBlocksInManifest(context.Background(), block.Hash(), block.Manifest(), block.ParentEntropy(c.NodeCtx())) } } } } // ConstructLocalBlock takes a header and construct the Block locally -func (c *Core) ConstructLocalMinedBlock(header *types.Header) (*types.Block, error) { - return c.sl.ConstructLocalMinedBlock(header) +func (c *Core) ConstructLocalMinedBlock(woHeader *types.WorkObject) (*types.WorkObject, error) { + return c.sl.ConstructLocalMinedBlock(woHeader) } func (c *Core) SubRelayPendingHeader(slPendingHeader types.PendingHeader, newEntropy *big.Int, location common.Location, subReorg bool, order int) { @@ -603,7 +605,7 @@ func (c *Core) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingHea c.sl.UpdateDom(oldTerminus, pendingHeader, location) } -func (c *Core) NewGenesisPendigHeader(pendingHeader *types.Header, domTerminus common.Hash, genesisHash common.Hash) { +func (c *Core) NewGenesisPendigHeader(pendingHeader *types.WorkObject, domTerminus common.Hash, genesisHash common.Hash) { c.sl.NewGenesisPendingHeader(pendingHeader, domTerminus, genesisHash) } @@ -611,11 +613,11 @@ func (c *Core) SetCurrentExpansionNumber(expansionNumber uint8) { c.sl.SetCurrentExpansionNumber(expansionNumber) } -func (c *Core) WriteGenesisBlock(block *types.Block, location common.Location) { +func (c *Core) WriteGenesisBlock(block *types.WorkObject, location common.Location) { c.sl.WriteGenesisBlock(block, location) } -func (c *Core) GetPendingHeader() (*types.Header, error) { +func (c *Core) GetPendingHeader() (*types.WorkObject, error) { return c.sl.GetPendingHeader() } @@ -632,7 +634,7 @@ func (c *Core) GetPendingEtxs(hash common.Hash) *types.PendingEtxs { } func (c *Core) GetPendingEtxsRollup(hash common.Hash, location common.Location) *types.PendingEtxsRollup { - return rawdb.ReadPendingEtxsRollup(c.sl.sliceDb, hash, location) + return rawdb.ReadPendingEtxsRollup(c.sl.sliceDb, hash) } func (c *Core) GetPendingEtxsRollupFromSub(hash common.Hash, location common.Location) (types.PendingEtxsRollup, error) { @@ -659,7 +661,7 @@ func (c *Core) AddPendingEtxsRollup(pEtxsRollup types.PendingEtxsRollup) error { return c.sl.AddPendingEtxsRollup(pEtxsRollup) } -func (c *Core) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes types.Termini) error { +func (c *Core) GenerateRecoveryPendingHeader(pendingHeader *types.WorkObject, checkpointHashes types.Termini) error { return c.sl.GenerateRecoveryPendingHeader(pendingHeader, checkpointHashes) } @@ -687,7 +689,7 @@ func (c *Core) SetSubClient(client *quaiclient.Client, location common.Location) c.sl.SetSubClient(client, location) } -func (c *Core) AddGenesisPendingEtxs(block *types.Block) { +func (c *Core) AddGenesisPendingEtxs(block *types.WorkObject) { c.sl.AddGenesisPendingEtxs(block) } @@ -701,64 +703,64 @@ func (c *Core) SubscribeExpansionEvent(ch chan<- ExpansionEvent) event.Subscript // GetBlock retrieves a block from the database by hash and number, // caching it if found. -func (c *Core) GetBlock(hash common.Hash, number uint64) *types.Block { +func (c *Core) GetBlock(hash common.Hash, number uint64) *types.WorkObject { return c.sl.hc.GetBlock(hash, number) } // GetBlockByHash retrieves a block from the database by hash, caching it if found. -func (c *Core) GetBlockByHash(hash common.Hash) *types.Block { - return c.sl.hc.GetBlockOrCandidateByHash(hash) +func (c *Core) GetBlockByHash(hash common.Hash) *types.WorkObject { + return c.sl.hc.GetBlockByHash(hash) } // GetBlockOrCandidateByHash retrieves a block from the database by hash, caching it if found. -func (c *Core) GetBlockOrCandidateByHash(hash common.Hash) *types.Block { +func (c *Core) GetBlockOrCandidateByHash(hash common.Hash) *types.WorkObject { return c.sl.hc.GetBlockOrCandidateByHash(hash) } // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. -func (c *Core) GetHeaderByNumber(number uint64) *types.Header { +func (c *Core) GetHeaderByNumber(number uint64) *types.WorkObject { return c.sl.hc.GetHeaderByNumber(number) } // GetBlockByNumber retrieves a block from the database by number, caching it // (associated with its hash) if found. -func (c *Core) GetBlockByNumber(number uint64) *types.Block { +func (c *Core) GetBlockByNumber(number uint64) *types.WorkObject { return c.sl.hc.GetBlockByNumber(number) } // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. // [deprecated by eth/62] -func (c *Core) GetBlocksFromHash(hash common.Hash, n int) []*types.Block { +func (c *Core) GetBlocksFromHash(hash common.Hash, n int) []*types.WorkObject { return c.sl.hc.GetBlocksFromHash(hash, n) } // GetUnclesInChain retrieves all the uncles from a given block backwards until // a specific distance is reached. -func (c *Core) GetUnclesInChain(block *types.Block, length int) []*types.Header { +func (c *Core) GetUnclesInChain(block *types.WorkObject, length int) []*types.WorkObjectHeader { return c.sl.hc.GetUnclesInChain(block, length) } // GetGasUsedInChain retrieves all the gas used from a given block backwards until // a specific distance is reached. -func (c *Core) GetGasUsedInChain(block *types.Block, length int) int64 { +func (c *Core) GetGasUsedInChain(block *types.WorkObject, length int) int64 { return c.sl.hc.GetGasUsedInChain(block, length) } // GetGasUsedInChain retrieves all the gas used from a given block backwards until // a specific distance is reached. -func (c *Core) CalculateBaseFee(header *types.Header) *big.Int { +func (c *Core) CalculateBaseFee(header *types.WorkObject) *big.Int { return c.sl.hc.CalculateBaseFee(header) } // CurrentBlock returns the block for the current header. -func (c *Core) CurrentBlock() *types.Block { +func (c *Core) CurrentBlock() *types.WorkObject { return c.sl.hc.CurrentBlock() } // CurrentHeader retrieves the current head header of the canonical chain. The // header is retrieved from the HeaderChain's internal cache. -func (c *Core) CurrentHeader() *types.Header { +func (c *Core) CurrentHeader() *types.WorkObject { return c.sl.hc.CurrentHeader() } @@ -768,36 +770,36 @@ func (c *Core) CurrentLogEntropy() *big.Int { } // TotalLogS returns the total entropy reduction if the chain since genesis to the given header -func (c *Core) TotalLogS(header *types.Header) *big.Int { +func (c *Core) TotalLogS(header *types.WorkObject) *big.Int { return c.engine.TotalLogS(c, header) } // CalcOrder returns the order of the block within the hierarchy of chains -func (c *Core) CalcOrder(header *types.Header) (*big.Int, int, error) { +func (c *Core) CalcOrder(header *types.WorkObject) (*big.Int, int, error) { return c.engine.CalcOrder(header) } // GetHeader retrieves a block header from the database by hash and number, // caching it if found. -func (c *Core) GetHeader(hash common.Hash, number uint64) *types.Header { +func (c *Core) GetHeader(hash common.Hash, number uint64) *types.WorkObject { return c.sl.hc.GetHeader(hash, number) } // GetHeaderByHash retrieves a block header from the database by hash, caching it if // found. -func (c *Core) GetHeaderByHash(hash common.Hash) *types.Header { +func (c *Core) GetHeaderByHash(hash common.Hash) *types.WorkObject { return c.sl.hc.GetHeaderByHash(hash) } // GetHeaderOrCandidate retrieves a block header from the database by hash and number, // caching it if found. -func (c *Core) GetHeaderOrCandidate(hash common.Hash, number uint64) *types.Header { +func (c *Core) GetHeaderOrCandidate(hash common.Hash, number uint64) *types.WorkObject { return c.sl.hc.GetHeaderOrCandidate(hash, number) } // GetHeaderOrCandidateByHash retrieves a block header from the database by hash, caching it if // found. -func (c *Core) GetHeaderOrCandidateByHash(hash common.Hash) *types.Header { +func (c *Core) GetHeaderOrCandidateByHash(hash common.Hash) *types.WorkObject { return c.sl.hc.GetHeaderOrCandidateByHash(hash) } @@ -828,7 +830,7 @@ func (c *Core) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCano } // Genesis retrieves the chain's genesis block. -func (c *Core) Genesis() *types.Block { +func (c *Core) Genesis() *types.WorkObject { return c.GetBlockByHash(c.sl.hc.genesisHeader.Hash()) } @@ -839,7 +841,7 @@ func (c *Core) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscript // GetBody retrieves a block body (transactions and uncles) from the database by // hash, caching it if found. -func (c *Core) GetBody(hash common.Hash) *types.Body { +func (c *Core) GetBody(hash common.Hash) *types.WorkObject { return c.sl.hc.GetBody(hash) } @@ -862,7 +864,7 @@ func (c *Core) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscript // ComputeEfficiencyScore computes the efficiency score for the given prime // block This data is is only valid if called from Prime context, otherwise // there is no guarantee for this data to be accurate -func (c *Core) ComputeEfficiencyScore(header *types.Header) uint16 { +func (c *Core) ComputeEfficiencyScore(header *types.WorkObject) uint16 { return c.sl.hc.ComputeEfficiencyScore(header) } @@ -875,15 +877,15 @@ func (c *Core) GetExpansionNumber() uint8 { return c.sl.hc.GetExpansionNumber() } -func (c *Core) UpdateEtxEligibleSlices(header *types.Header, location common.Location) common.Hash { +func (c *Core) UpdateEtxEligibleSlices(header *types.WorkObject, location common.Location) common.Hash { return c.sl.hc.UpdateEtxEligibleSlices(header, location) } -func (c *Core) IsSliceSetToReceiveEtx(header *types.Header, location common.Location) bool { +func (c *Core) IsSliceSetToReceiveEtx(header *types.WorkObject, location common.Location) bool { return c.sl.hc.IsSliceSetToReceiveEtx(header, location) } -func (c *Core) GetPrimeTerminus(header *types.Header) *types.Header { +func (c *Core) GetPrimeTerminus(header *types.WorkObject) *types.WorkObject { return c.sl.hc.GetPrimeTerminus(header) } @@ -987,7 +989,7 @@ func (c *Core) StopMining() { } // Pending returns the currently pending block and associated state. -func (c *Core) Pending() *types.Block { +func (c *Core) Pending() *types.WorkObject { return c.sl.miner.Pending() } @@ -996,12 +998,12 @@ func (c *Core) Pending() *types.Block { // Note, to access both the pending block and the pending state // simultaneously, please use Pending(), as the pending state can // change between multiple method calls -func (c *Core) PendingBlock() *types.Block { +func (c *Core) PendingBlock() *types.WorkObject { return c.sl.miner.PendingBlock() } // PendingBlockAndReceipts returns the currently pending block and corresponding receipts. -func (c *Core) PendingBlockAndReceipts() (*types.Block, types.Receipts) { +func (c *Core) PendingBlockAndReceipts() (*types.WorkObject, types.Receipts) { return c.sl.miner.PendingBlockAndReceipts() } @@ -1016,7 +1018,7 @@ func (c *Core) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription { } // SubscribePendingBlock starts delivering the pending block to the given channel. -func (c *Core) SubscribePendingHeader(ch chan<- *types.Header) event.Subscription { +func (c *Core) SubscribePendingHeader(ch chan<- *types.WorkObject) event.Subscription { return c.sl.miner.SubscribePendingHeader(ch) } @@ -1075,11 +1077,11 @@ func (c *Core) StateCache() state.Database { func (c *Core) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) { return c.sl.hc.bc.processor.ContractCodeWithPrefix(hash) } -func (c *Core) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) { +func (c *Core) StateAtBlock(block *types.WorkObject, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) { return c.sl.hc.bc.processor.StateAtBlock(block, reexec, base, checkLive) } -func (c *Core) StateAtTransaction(block *types.Block, txIndex int, reexec uint64) (Message, vm.BlockContext, *state.StateDB, error) { +func (c *Core) StateAtTransaction(block *types.WorkObject, txIndex int, reexec uint64) (Message, vm.BlockContext, *state.StateDB, error) { return c.sl.hc.bc.processor.StateAtTransaction(block, txIndex, reexec) } diff --git a/core/events.go b/core/events.go index 1edd945e49..a5c019d2b0 100644 --- a/core/events.go +++ b/core/events.go @@ -1,19 +1,3 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - package core import ( @@ -25,26 +9,26 @@ import ( type NewTxsEvent struct{ Txs []*types.Transaction } // NewMinedBlockEvent is posted when a block has been imported. -type NewMinedBlockEvent struct{ Block *types.Block } +type NewMinedBlockEvent struct{ Block *types.WorkObject } // RemovedLogsEvent is posted when a reorg happens type RemovedLogsEvent struct{ Logs []*types.Log } type ChainEvent struct { - Block *types.Block + Block *types.WorkObject Hash common.Hash Logs []*types.Log } type ChainSideEvent struct { - Blocks []*types.Block + Blocks []*types.WorkObject ResetUncles bool } type ChainHeadEvent struct { - Block *types.Block + Block *types.WorkObject } type ExpansionEvent struct { - Block *types.Block + Block *types.WorkObject } diff --git a/core/evm.go b/core/evm.go index 6b8245475e..5a0f8f81cd 100644 --- a/core/evm.go +++ b/core/evm.go @@ -32,12 +32,12 @@ type ChainContext interface { // Engine retrieves the chain's consensus engine. Engine() consensus.Engine + // GetHeader returns the hash corresponding to their hash. + GetHeader(common.Hash, uint64) *types.WorkObject + // GetHeader returns a block header from the database by hash. // The header might not be on the canonical chain. - GetHeaderOrCandidate(common.Hash, uint64) *types.Header - - // GetHeader returns a block header in the canonical chain from the database by hash. - GetHeader(common.Hash, uint64) *types.Header + GetHeaderOrCandidate(common.Hash, uint64) *types.WorkObject // NodeCtx returns the context of the running node NodeCtx() int @@ -46,7 +46,7 @@ type ChainContext interface { IsGenesisHash(common.Hash) bool // GetHeaderByHash returns a block header from the database by hash. - GetHeaderByHash(common.Hash) *types.Header + GetHeaderByHash(common.Hash) *types.WorkObject // CheckIfEtxIsEligible checks if the given slice is eligible to accept the // etx based on the EtxEligibleSlices @@ -54,7 +54,7 @@ type ChainContext interface { } // NewEVMBlockContext creates a new context for use in the EVM. -func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address) vm.BlockContext { +func NewEVMBlockContext(header *types.WorkObject, chain ChainContext, author *common.Address) vm.BlockContext { var ( beneficiary common.Address baseFee *big.Int @@ -119,7 +119,7 @@ func NewEVMTxContext(msg Message) vm.TxContext { } // GetHashFn returns a GetHashFunc which retrieves header hashes by number -func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash { +func GetHashFn(ref *types.WorkObject, chain ChainContext) func(n uint64) common.Hash { // Cache will initially contain [refHash.parent], // Then fill up with [refHash.p, refHash.pp, refHash.ppp, ...] var cache []common.Hash diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index c6ea3d8095..8732af0e1f 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -49,10 +49,10 @@ type Blockchain interface { Config() *params.ChainConfig // Genesis retrieves the chain's genesis block. - Genesis() *types.Block + Genesis() *types.WorkObject // CurrentHeader retrieves the current head header of the canonical chain. - CurrentHeader() *types.Header + CurrentHeader() *types.WorkObject } // ID is a fork identifier diff --git a/core/gen_genesis.go b/core/gen_genesis.go index 7e067232ab..f3584646a7 100644 --- a/core/gen_genesis.go +++ b/core/gen_genesis.go @@ -41,7 +41,7 @@ func (g Genesis) MarshalJSON() ([]byte, error) { enc.Coinbase = g.Coinbase enc.Number = make([]math.HexOrDecimal64, common.HierarchyDepth) enc.ParentHash = make([]common.Hash, common.HierarchyDepth) - for i := 0; i < common.HierarchyDepth; i++ { + for i := 0; i < common.HierarchyDepth-1; i++ { enc.Number[i] = math.HexOrDecimal64(g.Number[i]) enc.ParentHash[i] = g.ParentHash[i] } diff --git a/core/genesis.go b/core/genesis.go index 743eaff940..fe039e5236 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -37,7 +37,6 @@ import ( "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/params" - "github.com/dominant-strategies/go-quai/trie" ) //go:generate gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go @@ -192,7 +191,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, nodeLoca genesis = DefaultGenesisBlock() } // Ensure the stored genesis matches with the given one. - hash := genesis.ToBlock(nil, startingExpansionNumber).Hash() + hash := genesis.ToBlock(startingExpansionNumber).Hash() if hash != stored { return genesis.Config, hash, &GenesisMismatchError{stored, hash} } @@ -204,7 +203,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, nodeLoca } // Check whether the genesis block is already written. if genesis != nil { - hash := genesis.ToBlock(nil, startingExpansionNumber).Hash() + hash := genesis.ToBlock(startingExpansionNumber).Hash() if hash != stored { return genesis.Config, hash, &GenesisMismatchError{stored, hash} } @@ -265,30 +264,31 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { } } -// ToHeader creates the genesis header -func (g *Genesis) ToHeader(startingExpansionNumber uint64) *types.Header { - head := types.EmptyHeader() - head.SetNonce(types.EncodeNonce(g.Nonce)) - head.SetTime(g.Timestamp) - head.SetExtra(g.ExtraData) - head.SetDifficulty(g.Difficulty) - head.SetGasLimit(g.GasLimit) - head.SetGasUsed(0) +// ToBlock creates the genesis block and writes state of a genesis specification +// to the given database (or discards it if nil). +func (g *Genesis) ToBlock(startingExpansionNumber uint64) *types.WorkObject { + head := types.EmptyHeader(g.Config.Location.Context()) + head.WorkObjectHeader().SetNonce(types.EncodeNonce(g.Nonce)) + head.WorkObjectHeader().SetDifficulty(g.Difficulty) + head.WorkObjectHeader().SetTime(g.Timestamp) + head.Header().SetExtra(g.ExtraData) + head.Header().SetGasLimit(g.GasLimit) + head.Header().SetGasUsed(0) if startingExpansionNumber > 0 { // Fill each byte with 0xFF to set all bits to 1 var etxEligibleSlices common.Hash for i := 0; i < common.HashLength; i++ { etxEligibleSlices[i] = 0xFF } - head.SetEtxEligibleSlices(etxEligibleSlices) + head.Header().SetEtxEligibleSlices(etxEligibleSlices) } else { - head.SetEtxEligibleSlices(common.Hash{}) + head.Header().SetEtxEligibleSlices(common.Hash{}) } - head.SetCoinbase(common.Zero) - head.SetBaseFee(new(big.Int).SetUint64(params.InitialBaseFee)) - head.SetEtxSetHash(types.EmptyEtxSetHash) + head.Header().SetCoinbase(common.Zero) + head.Header().SetBaseFee(new(big.Int).SetUint64(params.InitialBaseFee)) + head.Header().SetEtxSetHash(types.EmptyEtxSetHash) if g.GasLimit == 0 { - head.SetGasLimit(params.GenesisGasLimit) + head.Header().SetGasLimit(params.GenesisGasLimit) } for i := 0; i < common.HierarchyDepth; i++ { head.SetNumber(big.NewInt(0), i) @@ -297,18 +297,11 @@ func (g *Genesis) ToHeader(startingExpansionNumber uint64) *types.Header { return head } -// ToBlock creates the genesis block and writes state of a genesis specification -// to the given database (or discards it if nil). -func (g *Genesis) ToBlock(db ethdb.Database, startingExpansionNumber uint64) *types.Block { - head := g.ToHeader(startingExpansionNumber) - return types.NewBlock(head, nil, nil, nil, nil, nil, trie.NewStackTrie(nil), g.Config.Location.Context()) -} - // Commit writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func (g *Genesis) Commit(db ethdb.Database, nodeLocation common.Location, startingExpansionNumber uint64) (*types.Block, error) { +func (g *Genesis) Commit(db ethdb.Database, nodeLocation common.Location, startingExpansionNumber uint64) (*types.WorkObject, error) { nodeCtx := nodeLocation.Context() - block := g.ToBlock(db, startingExpansionNumber) + block := g.ToBlock(startingExpansionNumber) if block.Number(nodeCtx).Sign() != 0 { return nil, fmt.Errorf("can't commit genesis block with number > 0") } @@ -318,7 +311,7 @@ func (g *Genesis) Commit(db ethdb.Database, nodeLocation common.Location, starti } rawdb.WriteGenesisHashes(db, common.Hashes{block.Hash()}) rawdb.WriteTermini(db, block.Hash(), types.EmptyTermini()) - rawdb.WriteBlock(db, block, nodeCtx) + rawdb.WriteWorkObject(db, block.Hash(), block, types.BlockObject, nodeCtx) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(nodeCtx), nil) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64(nodeCtx)) rawdb.WriteHeadBlockHash(db, block.Hash()) @@ -329,7 +322,7 @@ func (g *Genesis) Commit(db ethdb.Database, nodeLocation common.Location, starti // MustCommit writes the genesis block and state to db, panicking on error. // The block is committed as the canonical head block. -func (g *Genesis) MustCommit(db ethdb.Database, nodeLocation common.Location) *types.Block { +func (g *Genesis) MustCommit(db ethdb.Database, nodeLocation common.Location) *types.WorkObject { block, err := g.Commit(db, nodeLocation, 0) if err != nil { panic(err) @@ -338,7 +331,7 @@ func (g *Genesis) MustCommit(db ethdb.Database, nodeLocation common.Location) *t } // GenesisBlockForTesting creates and writes a block in which addr has the given wei balance. -func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int, nodeLocation common.Location) *types.Block { +func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big.Int, nodeLocation common.Location) *types.WorkObject { g := Genesis{ BaseFee: big.NewInt(params.InitialBaseFee), } diff --git a/core/headerchain.go b/core/headerchain.go index f3767fdeb3..8a912d4c28 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -53,7 +53,7 @@ type HeaderChain struct { scope event.SubscriptionScope headerDb ethdb.Database - genesisHeader *types.Header + genesisHeader *types.WorkObject currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) headerCache *lru.Cache // Cache for the most recent block headers @@ -72,7 +72,7 @@ type HeaderChain struct { procInterrupt int32 // interrupt signaler for block processing headermu sync.RWMutex - heads []*types.Header + heads []*types.WorkObject slicesRunning []common.Location logger *log.Logger @@ -102,7 +102,7 @@ func NewHeaderChain(db ethdb.Database, engine consensus.Engine, pEtxsRollupFetch } genesisHash := hc.GetGenesisHashes()[0] - hc.genesisHeader = hc.GetHeaderByHash(genesisHash) + hc.genesisHeader = rawdb.ReadWorkObject(db, genesisHash, types.BlockObject) if bytes.Equal(chainConfig.Location, common.Location{0, 0}) { if hc.genesisHeader == nil { return nil, ErrNoGenesis @@ -139,7 +139,7 @@ func NewHeaderChain(db ethdb.Database, engine consensus.Engine, pEtxsRollupFetch hc.subRollupCache = subRollupCache // Initialize the heads slice - heads := make([]*types.Header, 0) + heads := make([]*types.WorkObject, 0) hc.heads = heads // Initialize the UTXO cache @@ -150,13 +150,13 @@ func NewHeaderChain(db ethdb.Database, engine consensus.Engine, pEtxsRollupFetch // CollectSubRollup collects the rollup of ETXs emitted from the subordinate // chain in the slice which emitted the given block. -func (hc *HeaderChain) CollectSubRollup(b *types.Block) (types.Transactions, error) { +func (hc *HeaderChain) CollectSubRollup(b *types.WorkObject) (types.Transactions, error) { nodeCtx := hc.NodeCtx() subRollup := types.Transactions{} if nodeCtx < common.ZONE_CTX { // Since in prime the pending etxs are stored in 2 parts, pendingEtxsRollup // consists of region header and subrollups - for _, hash := range b.SubManifest() { + for _, hash := range b.Manifest() { if nodeCtx == common.PRIME_CTX { pEtxRollup, err := hc.GetPendingEtxsRollup(hash, b.Location()) if err == nil { @@ -171,7 +171,7 @@ func (hc *HeaderChain) CollectSubRollup(b *types.Block) (types.Transactions, err pendingEtxs, err := hc.GetPendingEtxs(hash) if err != nil { // Get the pendingEtx from the appropriate zone - hc.fetchPEtx(b.Hash(), hash, b.Header().Location()) + hc.fetchPEtx(b.Hash(), hash, b.Location()) return nil, ErrPendingEtxNotFound } subRollup = append(subRollup, pendingEtxs.Etxs...) @@ -208,7 +208,7 @@ func (hc *HeaderChain) GetPendingEtxsRollup(hash common.Hash, location common.Lo // Look for pending ETXs first in pending ETX cache, then in database if res, ok := hc.pendingEtxsRollup.Get(hash); ok && res != nil { rollups = res.(types.PendingEtxsRollup) - } else if res := rawdb.ReadPendingEtxsRollup(hc.headerDb, hash, location); res != nil { + } else if res := rawdb.ReadPendingEtxsRollup(hc.headerDb, hash); res != nil { rollups = *res } else { hc.logger.WithField("hash", hash.String()).Trace("Unable to find pending etx rollups for hash in manifest") @@ -234,7 +234,7 @@ func (hc *HeaderChain) GetBloom(hash common.Hash) (*types.Bloom, error) { // Collect all emmitted ETXs since the last coincident block, but excluding // those emitted in this block -func (hc *HeaderChain) CollectEtxRollup(b *types.Block) (types.Transactions, error) { +func (hc *HeaderChain) CollectEtxRollup(b *types.WorkObject) (types.Transactions, error) { if hc.IsGenesisHash(b.Hash()) { return b.ExtTransactions(), nil } @@ -245,7 +245,7 @@ func (hc *HeaderChain) CollectEtxRollup(b *types.Block) (types.Transactions, err return hc.collectInclusiveEtxRollup(parent) } -func (hc *HeaderChain) collectInclusiveEtxRollup(b *types.Block) (types.Transactions, error) { +func (hc *HeaderChain) collectInclusiveEtxRollup(b *types.WorkObject) (types.Transactions, error) { // Initialize the rollup with ETXs emitted by this block newEtxs := b.ExtTransactions() // Terminate the search if we reached genesis @@ -253,7 +253,7 @@ func (hc *HeaderChain) collectInclusiveEtxRollup(b *types.Block) (types.Transact return newEtxs, nil } // Terminate the search on coincidence with dom chain - if hc.engine.IsDomCoincident(hc, b.Header()) { + if hc.engine.IsDomCoincident(hc, b) { return newEtxs, nil } // Recursively get the ancestor rollup, until a coincident ancestor is found @@ -270,7 +270,7 @@ func (hc *HeaderChain) collectInclusiveEtxRollup(b *types.Block) (types.Transact } // Append -func (hc *HeaderChain) AppendHeader(header *types.Header) error { +func (hc *HeaderChain) AppendHeader(header *types.WorkObject) error { nodeCtx := hc.NodeCtx() hc.logger.WithFields(log.Fields{ "Hash": header.Hash(), @@ -317,7 +317,7 @@ func (hc *HeaderChain) ProcessingState() bool { } // Append -func (hc *HeaderChain) AppendBlock(block *types.Block, newInboundEtxs types.Transactions) error { +func (hc *HeaderChain) AppendBlock(block *types.WorkObject, newInboundEtxs types.Transactions) error { blockappend := time.Now() // Append block else revert header append logs, err := hc.bc.Append(block, newInboundEtxs) @@ -335,7 +335,7 @@ func (hc *HeaderChain) AppendBlock(block *types.Block, newInboundEtxs types.Tran } // SetCurrentHeader sets the current header based on the POEM choice -func (hc *HeaderChain) SetCurrentHeader(head *types.Header) error { +func (hc *HeaderChain) SetCurrentHeader(head *types.WorkObject) error { hc.headermu.Lock() defer hc.headermu.Unlock() @@ -361,11 +361,11 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) error { //Find a common header commonHeader := hc.findCommonAncestor(head) - newHeader := types.CopyHeader(head) + newHeader := types.CopyWorkObject(head) // Delete each header and rollback state processor until common header // Accumulate the hash slice stack - var hashStack []*types.Header + var hashStack []*types.WorkObject for { if newHeader.Hash() == commonHeader.Hash() { break @@ -380,7 +380,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) error { break } } - var prevHashStack []*types.Header + var prevHashStack []*types.WorkObject for { if prevHeader.Hash() == commonHeader.Hash() { break @@ -406,7 +406,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) error { // Every Block that got removed from the canonical hash db is sent in the side feed to be // recorded as uncles go func() { - var blocks []*types.Block + var blocks []*types.WorkObject for i := len(prevHashStack) - 1; i >= 0; i-- { block := hc.bc.GetBlock(prevHashStack[i].Hash(), prevHashStack[i].NumberU64(hc.NodeCtx())) if block != nil { @@ -421,7 +421,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) error { } // SetCurrentState updates the current Quai state and Qi UTXO set upon which the current pending block is built -func (hc *HeaderChain) SetCurrentState(head *types.Header) error { +func (hc *HeaderChain) SetCurrentState(head *types.WorkObject) error { hc.headermu.Lock() defer hc.headermu.Unlock() @@ -430,8 +430,8 @@ func (hc *HeaderChain) SetCurrentState(head *types.Header) error { return nil } - current := types.CopyHeader(head) - var headersWithoutState []*types.Header + current := types.CopyWorkObject(head) + var headersWithoutState []*types.WorkObject for { headersWithoutState = append(headersWithoutState, current) header := hc.GetHeaderByHash(current.ParentHash(nodeCtx)) @@ -443,11 +443,11 @@ func (hc *HeaderChain) SetCurrentState(head *types.Header) error { } // Checking of the Etx set exists makes sure that we have processed the // state of the parent block - etxSet := rawdb.ReadEtxSet(hc.headerDb, header.Hash(), header.NumberU64(nodeCtx), hc.NodeLocation()) + etxSet := rawdb.ReadEtxSet(hc.headerDb, header.Hash(), header.NumberU64(nodeCtx)) if etxSet != nil { break } - current = types.CopyHeader(header) + current = types.CopyWorkObject(header) } // Run through the hash stack to update canonicalHash and forward state processor @@ -461,19 +461,19 @@ func (hc *HeaderChain) SetCurrentState(head *types.Header) error { } // ReadInboundEtxsAndAppendBlock reads the inbound etxs from database and appends the block -func (hc *HeaderChain) ReadInboundEtxsAndAppendBlock(header *types.Header) error { +func (hc *HeaderChain) ReadInboundEtxsAndAppendBlock(header *types.WorkObject) error { nodeCtx := hc.NodeCtx() block := hc.GetBlockOrCandidate(header.Hash(), header.NumberU64(nodeCtx)) if block == nil { - return errors.New("Could not find block during reorg") + return errors.New("could not find block during reorg") } - _, order, err := hc.engine.CalcOrder(block.Header()) + _, order, err := hc.engine.CalcOrder(block) if err != nil { return err } var inboundEtxs types.Transactions if order < nodeCtx { - inboundEtxs = rawdb.ReadInboundEtxs(hc.headerDb, header.Hash(), hc.NodeLocation()) + inboundEtxs = rawdb.ReadInboundEtxs(hc.headerDb, header.Hash()) } err = hc.AppendBlock(block, inboundEtxs) if err != nil { @@ -483,8 +483,8 @@ func (hc *HeaderChain) ReadInboundEtxsAndAppendBlock(header *types.Header) error } // findCommonAncestor -func (hc *HeaderChain) findCommonAncestor(header *types.Header) *types.Header { - current := types.CopyHeader(header) +func (hc *HeaderChain) findCommonAncestor(header *types.WorkObject) *types.WorkObject { + current := types.CopyWorkObject(header) for { if current == nil { return nil @@ -554,7 +554,7 @@ func (hc *HeaderChain) loadLastState() error { hc.currentHeader.Store(recoveredHeader) } - heads := make([]*types.Header, 0) + heads := make([]*types.WorkObject, 0) for _, hash := range headsHashes { heads = append(heads, hc.GetHeaderByHash(hash)) } @@ -678,20 +678,20 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma return hash, number } -func (hc *HeaderChain) WriteBlock(block *types.Block) { - hc.bc.WriteBlock(block) +func (hc *HeaderChain) WriteBlock(block *types.WorkObject) { + hc.bc.WriteBlock(block, hc.NodeCtx()) } // GetHeader retrieves a block header from the database by hash and number, // caching it if found. -func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { +func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.WorkObject { termini := hc.GetTerminiByHash(hash) if termini == nil { return nil } // Short circuit if the header's already in the cache, retrieve otherwise if header, ok := hc.headerCache.Get(hash); ok { - return header.(*types.Header) + return header.(*types.WorkObject) } header := rawdb.ReadHeader(hc.headerDb, hash, number) if header == nil { @@ -704,7 +704,7 @@ func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header // GetHeaderByHash retrieves a block header from the database by hash, caching it if // found. -func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { +func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.WorkObject { termini := hc.GetTerminiByHash(hash) if termini == nil { return nil @@ -719,10 +719,10 @@ func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { // GetHeaderOrCandidate retrieves a block header from the database by hash and number, // caching it if found. -func (hc *HeaderChain) GetHeaderOrCandidate(hash common.Hash, number uint64) *types.Header { +func (hc *HeaderChain) GetHeaderOrCandidate(hash common.Hash, number uint64) *types.WorkObject { // Short circuit if the header's already in the cache, retrieve otherwise if header, ok := hc.headerCache.Get(hash); ok { - return header.(*types.Header) + return header.(*types.WorkObject) } header := rawdb.ReadHeader(hc.headerDb, hash, number) if header == nil { @@ -735,7 +735,7 @@ func (hc *HeaderChain) GetHeaderOrCandidate(hash common.Hash, number uint64) *ty // RecoverCurrentHeader retrieves the current head header of the canonical chain. The // header is retrieved from the HeaderChain's internal cache -func (hc *HeaderChain) RecoverCurrentHeader() *types.Header { +func (hc *HeaderChain) RecoverCurrentHeader() *types.WorkObject { // Start logarithmic ascent to find the upper bound high := uint64(1) for hc.GetHeaderByNumber(high) != nil { @@ -759,7 +759,7 @@ func (hc *HeaderChain) RecoverCurrentHeader() *types.Header { // GetHeaderOrCandidateByHash retrieves a block header from the database by hash, caching it if // found. -func (hc *HeaderChain) GetHeaderOrCandidateByHash(hash common.Hash) *types.Header { +func (hc *HeaderChain) GetHeaderOrCandidateByHash(hash common.Hash) *types.WorkObject { number := hc.GetBlockNumber(hash) if number == nil { return nil @@ -780,7 +780,7 @@ func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. -func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { +func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.WorkObject { hash := rawdb.ReadCanonicalHash(hc.headerDb, number) if hash == (common.Hash{}) { return nil @@ -795,17 +795,17 @@ func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { // CurrentHeader retrieves the current head header of the canonical chain. The // header is retrieved from the HeaderChain's internal cache. -func (hc *HeaderChain) CurrentHeader() *types.Header { - return hc.currentHeader.Load().(*types.Header) +func (hc *HeaderChain) CurrentHeader() *types.WorkObject { + return hc.currentHeader.Load().(*types.WorkObject) } // CurrentBlock returns the block for the current header. -func (hc *HeaderChain) CurrentBlock() *types.Block { +func (hc *HeaderChain) CurrentBlock() *types.WorkObject { return hc.GetBlockOrCandidateByHash(hc.CurrentHeader().Hash()) } // SetGenesis sets a new genesis block header for the chain -func (hc *HeaderChain) SetGenesis(head *types.Header) { +func (hc *HeaderChain) SetGenesis(head *types.WorkObject) { hc.genesisHeader = head } @@ -814,10 +814,14 @@ func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } // GetBlock implements consensus.ChainReader, and returns nil for every input as // a header chain does not have blocks available for retrieval. -func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block { +func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.WorkObject { return hc.bc.GetBlock(hash, number) } +func (hc *HeaderChain) GetWorkObject(hash common.Hash) *types.WorkObject { + return hc.bc.GetWorkObject(hash) +} + // CheckContext checks to make sure the range of a context or order is valid func (hc *HeaderChain) CheckContext(context int) error { if context < 0 || context > common.HierarchyDepth { @@ -833,8 +837,8 @@ func (hc *HeaderChain) GasLimit() uint64 { // GetUnclesInChain retrieves all the uncles from a given block backwards until // a specific distance is reached. -func (hc *HeaderChain) GetUnclesInChain(block *types.Block, length int) []*types.Header { - uncles := []*types.Header{} +func (hc *HeaderChain) GetUnclesInChain(block *types.WorkObject, length int) []*types.WorkObjectHeader { + uncles := []*types.WorkObjectHeader{} for i := 0; block != nil && i < length; i++ { uncles = append(uncles, block.Uncles()...) block = hc.GetBlock(block.ParentHash(hc.NodeCtx()), block.NumberU64(hc.NodeCtx())-1) @@ -844,7 +848,7 @@ func (hc *HeaderChain) GetUnclesInChain(block *types.Block, length int) []*types // GetGasUsedInChain retrieves all the gas used from a given block backwards until // a specific distance is reached. -func (hc *HeaderChain) GetGasUsedInChain(block *types.Block, length int) int64 { +func (hc *HeaderChain) GetGasUsedInChain(block *types.WorkObject, length int) int64 { gasUsed := 0 for i := 0; block != nil && i < length; i++ { gasUsed += int(block.GasUsed()) @@ -855,7 +859,7 @@ func (hc *HeaderChain) GetGasUsedInChain(block *types.Block, length int) int64 { // GetGasUsedInChain retrieves all the gas used from a given block backwards until // a specific distance is reached. -func (hc *HeaderChain) CalculateBaseFee(header *types.Header) *big.Int { +func (hc *HeaderChain) CalculateBaseFee(header *types.WorkObject) *big.Int { return misc.CalcBaseFee(hc.Config(), header) } @@ -870,17 +874,17 @@ func (hc *HeaderChain) ExportN(w io.Writer, first uint64, last uint64) error { } // GetBlockFromCacheOrDb looks up the body cache first and then checks the db -func (hc *HeaderChain) GetBlockFromCacheOrDb(hash common.Hash, number uint64) *types.Block { +func (hc *HeaderChain) GetBlockFromCacheOrDb(hash common.Hash, number uint64) *types.WorkObject { // Short circuit if the block's already in the cache, retrieve otherwise if cached, ok := hc.bc.blockCache.Get(hash); ok { - block := cached.(*types.Block) + block := cached.(*types.WorkObject) return block } return hc.GetBlock(hash, number) } // GetBlockByHash retrieves a block from the database by hash, caching it if found. -func (hc *HeaderChain) GetBlockByHash(hash common.Hash) *types.Block { +func (hc *HeaderChain) GetBlockByHash(hash common.Hash) *types.WorkObject { number := hc.GetBlockNumber(hash) if number == nil { return nil @@ -888,12 +892,12 @@ func (hc *HeaderChain) GetBlockByHash(hash common.Hash) *types.Block { return hc.GetBlock(hash, *number) } -func (hc *HeaderChain) GetBlockOrCandidate(hash common.Hash, number uint64) *types.Block { +func (hc *HeaderChain) GetBlockOrCandidate(hash common.Hash, number uint64) *types.WorkObject { return hc.bc.GetBlockOrCandidate(hash, number) } // GetBlockOrCandidateByHash retrieves any block from the database by hash, caching it if found. -func (hc *HeaderChain) GetBlockOrCandidateByHash(hash common.Hash) *types.Block { +func (hc *HeaderChain) GetBlockOrCandidateByHash(hash common.Hash) *types.WorkObject { number := hc.GetBlockNumber(hash) if number == nil { return nil @@ -903,7 +907,7 @@ func (hc *HeaderChain) GetBlockOrCandidateByHash(hash common.Hash) *types.Block // GetBlockByNumber retrieves a block from the database by number, caching it // (associated with its hash) if found. -func (hc *HeaderChain) GetBlockByNumber(number uint64) *types.Block { +func (hc *HeaderChain) GetBlockByNumber(number uint64) *types.WorkObject { hash := rawdb.ReadCanonicalHash(hc.headerDb, number) if hash == (common.Hash{}) { return nil @@ -913,17 +917,17 @@ func (hc *HeaderChain) GetBlockByNumber(number uint64) *types.Block { // GetBody retrieves a block body (transactions and uncles) from the database by // hash, caching it if found. -func (hc *HeaderChain) GetBody(hash common.Hash) *types.Body { +func (hc *HeaderChain) GetBody(hash common.Hash) *types.WorkObject { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := hc.bc.bodyCache.Get(hash); ok { - body := cached.(*types.Body) + body := cached.(*types.WorkObject) return body } number := hc.GetBlockNumber(hash) if number == nil { return nil } - body := rawdb.ReadBody(hc.headerDb, hash, *number, hc.NodeLocation()) + body := rawdb.ReadWorkObject(hc.headerDb, hash, types.BlockObject) if body == nil { return nil } @@ -954,7 +958,7 @@ func (hc *HeaderChain) GetBodyRLP(hash common.Hash) rlp.RawValue { // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. // [deprecated by eth/62] -func (hc *HeaderChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { +func (hc *HeaderChain) GetBlocksFromHash(hash common.Hash, n int) (blocks types.WorkObjects) { number := hc.GetBlockNumber(hash) if number == nil { return nil @@ -1062,7 +1066,7 @@ func (hc *HeaderChain) InitializeAddressUtxoCache() error { } // ComputeEfficiencyScore calculates the efficiency score for the given header -func (hc *HeaderChain) ComputeEfficiencyScore(parent *types.Header) uint16 { +func (hc *HeaderChain) ComputeEfficiencyScore(parent *types.WorkObject) uint16 { deltaS := new(big.Int).Add(parent.ParentDeltaS(common.REGION_CTX), parent.ParentDeltaS(common.ZONE_CTX)) uncledDeltaS := new(big.Int).Add(parent.ParentUncledSubDeltaS(common.REGION_CTX), parent.ParentUncledSubDeltaS(common.ZONE_CTX)) @@ -1076,7 +1080,7 @@ func (hc *HeaderChain) ComputeEfficiencyScore(parent *types.Header) uint16 { } // UpdateEtxEligibleSlices returns the updated etx eligible slices field -func (hc *HeaderChain) UpdateEtxEligibleSlices(header *types.Header, location common.Location) common.Hash { +func (hc *HeaderChain) UpdateEtxEligibleSlices(header *types.WorkObject, location common.Location) common.Hash { // After 5 days of the start of a new chain, the chain becomes eligible to receive etxs position := location[0]*16 + location[1] byteIndex := position / 8 // Find the byte index within the array @@ -1094,7 +1098,7 @@ func (hc *HeaderChain) UpdateEtxEligibleSlices(header *types.Header, location co // IsSliceSetToReceiveEtx returns true if the etx eligible slice is set to // receive etx for the given zone location -func (hc *HeaderChain) IsSliceSetToReceiveEtx(header *types.Header, location common.Location) bool { +func (hc *HeaderChain) IsSliceSetToReceiveEtx(header *types.WorkObject, location common.Location) bool { return hc.CheckIfEtxIsEligible(header.EtxEligibleSlices(), location) } @@ -1143,6 +1147,6 @@ func (hc *HeaderChain) GetExpansionNumber() uint8 { return hc.currentExpansionNumber } -func (hc *HeaderChain) GetPrimeTerminus(header *types.Header) *types.Header { +func (hc *HeaderChain) GetPrimeTerminus(header *types.WorkObject) *types.WorkObject { return hc.GetHeaderByHash(header.PrimeTerminus()) } diff --git a/core/miner.go b/core/miner.go index 079df1806d..a8960eb4e8 100644 --- a/core/miner.go +++ b/core/miner.go @@ -44,7 +44,7 @@ type Miner struct { logger *log.Logger } -func New(hc *HeaderChain, txPool *TxPool, config *Config, db ethdb.Database, chainConfig *params.ChainConfig, engine consensus.Engine, isLocalBlock func(block *types.Header) bool, processingState bool, logger *log.Logger) *Miner { +func New(hc *HeaderChain, txPool *TxPool, config *Config, db ethdb.Database, chainConfig *params.ChainConfig, engine consensus.Engine, isLocalBlock func(block *types.WorkObject) bool, processingState bool, logger *log.Logger) *Miner { miner := &Miner{ hc: hc, engine: engine, @@ -139,7 +139,7 @@ func (miner *Miner) SetRecommitInterval(interval time.Duration) { } // Pending returns the currently pending block and associated state. -func (miner *Miner) Pending() *types.Block { +func (miner *Miner) Pending() *types.WorkObject { return miner.worker.pending() } @@ -148,12 +148,12 @@ func (miner *Miner) Pending() *types.Block { // Note, to access both the pending block and the pending state // simultaneously, please use Pending(), as the pending state can // change between multiple method calls -func (miner *Miner) PendingBlock() *types.Block { +func (miner *Miner) PendingBlock() *types.WorkObject { return miner.worker.pendingBlock() } // PendingBlockAndReceipts returns the currently pending block and corresponding receipts. -func (miner *Miner) PendingBlockAndReceipts() (*types.Block, types.Receipts) { +func (miner *Miner) PendingBlockAndReceipts() (*types.WorkObject, types.Receipts) { return miner.worker.pendingBlockAndReceipts() } @@ -191,6 +191,6 @@ func (miner *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscript } // SubscribePendingBlock starts delivering the pending block to the given channel. -func (miner *Miner) SubscribePendingHeader(ch chan<- *types.Header) event.Subscription { +func (miner *Miner) SubscribePendingHeader(ch chan<- *types.WorkObject) event.Subscription { return miner.worker.pendingHeaderFeed.Subscribe(ch) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index d9dd041f00..6eb3ffc462 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -21,7 +21,6 @@ import ( "encoding/binary" "errors" "math/big" - "sort" "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/core/types" @@ -151,7 +150,7 @@ func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) { // ReadHeadBlockHash retrieves the hash of the current canonical head block. func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash { - data, _ := db.Get(headBlockKey) + data, _ := db.Get(headWorkObjectKey) if len(data) == 0 { return common.Hash{} } @@ -160,7 +159,7 @@ func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash { // WriteHeadBlockHash stores the head block's hash. func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { - if err := db.Put(headBlockKey, hash.Bytes()); err != nil { + if err := db.Put(headWorkObjectKey, hash.Bytes()); err != nil { log.Global.WithField("err", err).Fatal("Failed to store last block's hash") } } @@ -285,27 +284,8 @@ func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { } // ReadHeader retrieves the block header corresponding to the hash. -func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header { - data := ReadHeaderProto(db, hash, number) - if len(data) == 0 { - log.Global.Warn("proto header is nil") - return nil - } - protoHeader := new(types.ProtoHeader) - err := proto.Unmarshal(data, protoHeader) - if err != nil { - log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal header") - } - header := new(types.Header) - err = header.ProtoDecode(protoHeader) - if err != nil { - log.Global.WithFields(log.Fields{ - "hash": hash, - "err": err, - }).Error("Invalid block header Proto") - return nil - } - return header +func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.WorkObject { + return ReadWorkObject(db, hash, types.BlockObject) } // WriteHeader stores a block header into the database and also stores the hash- @@ -417,51 +397,8 @@ func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { return true } -// ReadBody retrieves the block body corresponding to the hash. -func ReadBody(db ethdb.Reader, hash common.Hash, number uint64, location common.Location) *types.Body { - data := ReadBodyProto(db, hash, number) - if len(data) == 0 { - return nil - } - protoBody := new(types.ProtoBody) - err := proto.Unmarshal(data, protoBody) - if err != nil { - log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal body") - } - body := new(types.Body) - err = body.ProtoDecode(protoBody, location) - if err != nil { - log.Global.WithFields(log.Fields{ - "hash": hash, - "err": err, - }).Error("Invalid block body Proto") - return nil - } - return body -} - -// WriteBody stores a block body into the database. -func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) { - protoBody, err := body.ProtoEncode() - if err != nil { - log.Global.WithField("err", err).Fatal("Failed to proto encode body") - } - data, err := proto.Marshal(protoBody) - if err != nil { - log.Global.WithField("err", err).Fatal("Failed to proto Marshal body") - } - WriteBodyProto(db, hash, number, data) -} - -// DeleteBody removes all block body data associated with a hash. -func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { - if err := db.Delete(blockBodyKey(number, hash)); err != nil { - log.Global.WithField("err", err).Fatal("Failed to delete block body") - } -} - // ReadPbCacheBody retrieves the block body corresponding to the hash. -func ReadPbCacheBody(db ethdb.Reader, hash common.Hash, location common.Location) *types.Body { +func ReadPbCacheBody(db ethdb.Reader, hash common.Hash) *types.WorkObject { data, err := db.Get(pbBodyKey(hash)) if err != nil { log.Global.WithFields(log.Fields{ @@ -473,12 +410,12 @@ func ReadPbCacheBody(db ethdb.Reader, hash common.Hash, location common.Location if len(data) == 0 { return nil } - protoBody := new(types.ProtoBody) - if err := proto.Unmarshal(data, protoBody); err != nil { + protoWorkObject := new(types.ProtoWorkObject) + if err := proto.Unmarshal(data, protoWorkObject); err != nil { log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal body") } - body := new(types.Body) - body.ProtoDecode(protoBody, location) + body := new(types.WorkObject) + body.ProtoDecode(protoWorkObject, db.Location(), types.PhObject) if err != nil { log.Global.WithFields(log.Fields{ "hash": hash, @@ -490,8 +427,8 @@ func ReadPbCacheBody(db ethdb.Reader, hash common.Hash, location common.Location } // WritePbCacheBody stores a block body into the database. -func WritePbCacheBody(db ethdb.KeyValueWriter, hash common.Hash, body *types.Body) { - protoBody, err := body.ProtoEncode() +func WritePbCacheBody(db ethdb.KeyValueWriter, hash common.Hash, body *types.WorkObject) { + protoBody, err := body.ProtoEncode(types.PhObject) if err != nil { log.Global.WithField("err", err).Fatal("Failed to proto encode body") } @@ -607,6 +544,165 @@ func DeleteTermini(db ethdb.KeyValueWriter, hash common.Hash) { } } +// ReadWorkObjectHeader retreive's the work object header stored in hash. +func ReadWorkObjectHeader(db ethdb.Reader, hash common.Hash, woType int) *types.WorkObjectHeader { + var key []byte + switch woType { + case types.BlockObject: + key = blockWorkObjectHeaderKey(hash) + case types.TxObject: + key = txWorkObjectHeaderKey(hash) + case types.PhObject: + key = phWorkObjectHeaderKey(hash) + } + data, _ := db.Get(key) + if len(data) == 0 { + return nil + } + protoWorkObjectHeader := new(types.ProtoWorkObjectHeader) + err := proto.Unmarshal(data, protoWorkObjectHeader) + if err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal work object header") + } + workObjectHeader := new(types.WorkObjectHeader) + err = workObjectHeader.ProtoDecode(protoWorkObjectHeader) + if err != nil { + log.Global.WithFields(log.Fields{ + "hash": hash, + "err": err, + }).Error("Invalid work object header Proto") + return nil + } + return workObjectHeader +} + +// WriteWorkObjectHeader writes the work object header of the terminus hash. +func WriteWorkObjectHeader(db ethdb.KeyValueWriter, hash common.Hash, workObject *types.WorkObject, woType int, nodeCtx int) { + var key []byte + switch woType { + case types.BlockObject: + key = blockWorkObjectHeaderKey(hash) + case types.TxObject: + key = txWorkObjectHeaderKey(hash) + case types.PhObject: + key = phWorkObjectHeaderKey(hash) + } + protoWorkObjectHeader, err := workObject.WorkObjectHeader().ProtoEncode() + if err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto encode work object header") + } + data, err := proto.Marshal(protoWorkObjectHeader) + if err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto Marshal work object header") + } + if err := db.Put(key, data); err != nil { + log.Global.WithField("err", err).Fatal("Failed to store work object header") + } +} + +// DeleteWorkObjectHeader deletes the work object header stored for the header hash. +func DeleteWorkObjectHeader(db ethdb.KeyValueWriter, hash common.Hash, woType int) { + var key []byte + switch woType { + case types.BlockObject: + key = blockWorkObjectHeaderKey(hash) + case types.TxObject: + key = txWorkObjectHeaderKey(hash) + case types.PhObject: + key = phWorkObjectHeaderKey(hash) + } + if err := db.Delete(key); err != nil { + log.Global.WithField("err", err).Fatal("Failed to delete work object header ") + } +} + +// ReadWorkObject retreive's the work object stored in hash. +func ReadWorkObject(db ethdb.Reader, hash common.Hash, woType int) *types.WorkObject { + workObjectHeader := ReadWorkObjectHeader(db, hash, woType) + if workObjectHeader == nil { + return nil + } + workObjectBody := ReadWorkObjectBody(db, hash) + if workObjectBody == nil { + return nil + } + return types.NewWorkObject(workObjectHeader, workObjectBody, nil, woType) //TODO: mmtx transaction +} + +// WriteWorkObject writes the work object of the terminus hash. +func WriteWorkObject(db ethdb.KeyValueWriter, hash common.Hash, workObject *types.WorkObject, woType int, nodeCtx int) { + WriteWorkObjectBody(db, hash, workObject, woType, nodeCtx) + WriteWorkObjectHeader(db, hash, workObject, woType, nodeCtx) +} + +// DeleteWorkObject deletes the work object stored for the header hash. +func DeleteWorkObject(db ethdb.KeyValueWriter, hash common.Hash, number uint64, woType int) { + DeleteWorkObjectBody(db, hash) + DeleteWorkObjectHeader(db, hash, woType) //TODO: mmtx transaction + DeleteHeader(db, hash, number) + DeleteReceipts(db, hash, number) +} + +// DeleteWorkObjectWithoutNumber removes all block data associated with a hash, except +// the hash to number mapping. +func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64, woType int) { + DeleteWorkObjectBody(db, hash) + DeleteWorkObjectHeader(db, hash, woType) //TODO: mmtx transaction + DeleteReceipts(db, hash, number) + deleteHeaderWithoutNumber(db, hash, number) +} + +// ReadWorkObjectBody retreive's the work object body stored in hash. +func ReadWorkObjectBody(db ethdb.Reader, hash common.Hash) *types.WorkObjectBody { + key := workObjectBodyKey(hash) + data, _ := db.Get(key) + if len(data) == 0 { + return nil + } + protoWorkObjectBody := new(types.ProtoWorkObjectBody) + err := proto.Unmarshal(data, protoWorkObjectBody) + if err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal work object body") + } + workObjectBody := new(types.WorkObjectBody) + err = workObjectBody.ProtoDecode(protoWorkObjectBody, db.Location()) + if err != nil { + log.Global.WithFields(log.Fields{ + "hash": hash, + "err": err, + }).Error("Invalid work object body Proto") + return nil + } + return workObjectBody +} + +// WriteWorkObjectBody writes the work object body of the terminus hash. +func WriteWorkObjectBody(db ethdb.KeyValueWriter, hash common.Hash, workObject *types.WorkObject, woType int, nodeCtx int) { + + key := workObjectBodyKey(hash) + WriteHeaderNumber(db, hash, workObject.NumberU64(nodeCtx)) + + protoWorkObjectBody, err := workObject.Body().ProtoEncode() + if err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto encode work object body") + } + data, err := proto.Marshal(protoWorkObjectBody) + if err != nil { + log.Global.WithField("err", err).Fatal("Failed to proto Marshal work object body") + } + if err := db.Put(key, data); err != nil { + log.Global.WithField("err", err).Fatal("Failed to store work object body") + } +} + +// DeleteWorkObjectBody deletes the work object body stored for the header hash. +func DeleteWorkObjectBody(db ethdb.KeyValueWriter, hash common.Hash) { + key := workObjectBodyKey(hash) + if err := db.Delete(key); err != nil { + log.Global.WithField("err", err).Fatal("Failed to delete work object body ") + } +} + // ReadPendingHeader retreive's the pending header stored in hash. func ReadPendingHeader(db ethdb.Reader, hash common.Hash) *types.PendingHeader { key := pendingHeaderKey(hash) @@ -624,7 +720,7 @@ func ReadPendingHeader(db ethdb.Reader, hash common.Hash) *types.PendingHeader { pendingHeader := new(types.PendingHeader) - err = pendingHeader.ProtoDecode(protoPendingHeader) + err = pendingHeader.ProtoDecode(protoPendingHeader, db.Location()) if err != nil { log.Global.WithFields(log.Fields{ "hash": hash, @@ -776,7 +872,7 @@ func ReadReceiptsProto(db ethdb.Reader, hash common.Hash, number uint64) []byte // ReadRawReceipts retrieves all the transaction receipts belonging to a block. // The receipt metadata fields are not guaranteed to be populated, so they // should not be used. Use ReadReceipts instead if the metadata is needed. -func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64, location common.Location) types.Receipts { +func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts { // Retrieve the flattened receipt slice data := ReadReceiptsProto(db, hash, number) if len(data) == 0 { @@ -790,7 +886,7 @@ func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64, location } // Convert the receipts from their storage form to their internal representation storageReceipts := new(types.ReceiptsForStorage) - err = storageReceipts.ProtoDecode(protoReceipt, location) + err = storageReceipts.ProtoDecode(protoReceipt, db.Location()) if err != nil { log.Global.WithFields(log.Fields{ "hash": hash, @@ -814,11 +910,11 @@ func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64, location // if the receipt itself is stored. func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { // We're deriving many fields from the block body, retrieve beside the receipt - receipts := ReadRawReceipts(db, hash, number, config.Location) + receipts := ReadRawReceipts(db, hash, number) if receipts == nil { return nil } - body := ReadBody(db, hash, number, config.Location) + body := ReadWorkObject(db, hash, types.BlockObject) if body == nil { log.Global.WithFields(log.Fields{ "hash": hash, @@ -866,106 +962,68 @@ func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { } } -// ReadBlock retrieves an entire block corresponding to the hash, assembling it -// back from the stored header and body. If either the header or body could not -// be retrieved nil is returned. -// -// Note, due to concurrent download of header and block body the header and thus -// canonical hash can be stored in the database but the body data not (yet). -func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64, location common.Location) *types.Block { - header := ReadHeader(db, hash, number) - if header == nil { - return nil - } - body := ReadBody(db, hash, number, location) - if body == nil { - return nil - } - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles, body.ExtTransactions, body.SubManifest, body.InterlinkHashes) -} - -// WriteBlock serializes a block into the database, header and body separately. -func WriteBlock(db ethdb.KeyValueWriter, block *types.Block, nodeCtx int) { - WriteBody(db, block.Hash(), block.NumberU64(nodeCtx), block.Body()) - WriteHeader(db, block.Header(), nodeCtx) -} +const badWorkObjectToKeep = 10 -// DeleteBlock removes all block data associated with a hash. -func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { - DeleteReceipts(db, hash, number) - DeleteHeader(db, hash, number) - DeleteBody(db, hash, number) +type badWorkObject struct { + woHeader *types.WorkObjectHeader + woBody *types.WorkObjectBody + tx types.Transaction } -// DeleteBlockWithoutNumber removes all block data associated with a hash, except -// the hash to number mapping. -func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { - DeleteReceipts(db, hash, number) - deleteHeaderWithoutNumber(db, hash, number) - DeleteBody(db, hash, number) -} - -const badBlockToKeep = 10 - -type badBlock struct { - Header *types.Header - Body *types.Body -} - -// ProtoEncode returns the protobuf encoding of the bad block. -func (b badBlock) ProtoEncode() *ProtoBadBlock { - protoHeader, err := b.Header.ProtoEncode() +// ProtoEncode returns the protobuf encoding of the bad workObject. +func (b badWorkObject) ProtoEncode() *ProtoBadWorkObject { + protoWorkObjectHeader, err := b.woHeader.ProtoEncode() if err != nil { log.Global.WithField("err", err).Fatal("Failed to proto encode header") } - protoBody, err := b.Body.ProtoEncode() + protoWorkObjectBody, err := b.woBody.ProtoEncode() if err != nil { log.Global.WithField("err", err).Fatal("Failed to proto encode body") } - return &ProtoBadBlock{ - Header: protoHeader, - Body: protoBody, + return &ProtoBadWorkObject{ + WoHeader: protoWorkObjectHeader, + WoBody: protoWorkObjectBody, } } -// ProtoDecode decodes the protobuf encoding of the bad block. -func (b *badBlock) ProtoDecode(pb *ProtoBadBlock, location common.Location) error { - header := new(types.Header) - if err := header.ProtoDecode(pb.Header); err != nil { +// ProtoDecode decodes the protobuf encoding of the bad workObject. +func (b *badWorkObject) ProtoDecode(pb *ProtoBadWorkObject) error { + woHeader := new(types.WorkObjectHeader) + if err := woHeader.ProtoDecode(pb.WoHeader); err != nil { return err } - b.Header = header - body := new(types.Body) - if err := body.ProtoDecode(pb.Body, location); err != nil { + b.woHeader = woHeader + woBody := new(types.WorkObjectBody) + if err := woBody.ProtoDecode(pb.WoBody, b.woHeader.Location()); err != nil { return err } - b.Body = body + b.woBody = woBody return nil } -// badBlockList implements the sort interface to allow sorting a list of +// badWorkObjectList implements the sort interface to allow sorting a list of // bad blocks by their number in the reverse order. -type badBlockList []*badBlock +type badWorkObjectList []*badWorkObject -func (s badBlockList) Len() int { return len(s) } -func (s badBlockList) Less(i, j int) bool { - return s[i].Header.NumberU64(common.ZONE_CTX) < s[j].Header.NumberU64(common.ZONE_CTX) +func (s badWorkObjectList) Len() int { return len(s) } +func (s badWorkObjectList) Less(i, j int) bool { + return s[i].woHeader.NumberU64() < s[j].woHeader.NumberU64() } -func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s badWorkObjectList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s badBlockList) ProtoEncode() *ProtoBadBlocks { - protoList := make([]*ProtoBadBlock, len(s)) +func (s badWorkObjectList) ProtoEncode() *ProtoBadWorkObjects { + protoList := make([]*ProtoBadWorkObject, len(s)) for i, bad := range s { protoList[i] = bad.ProtoEncode() } - return &ProtoBadBlocks{BadBlocks: protoList} + return &ProtoBadWorkObjects{BadWorkObjects: protoList} } -func (s *badBlockList) ProtoDecode(pb *ProtoBadBlocks, location common.Location) error { - list := make(badBlockList, len(pb.BadBlocks)) - for i, protoBlock := range pb.BadBlocks { - block := new(badBlock) - if err := block.ProtoDecode(protoBlock, location); err != nil { +func (s *badWorkObjectList) ProtoDecode(pb *ProtoBadWorkObjects) error { + list := make(badWorkObjectList, len(pb.BadWorkObjects)) + for i, protoBlock := range pb.BadWorkObjects { + block := new(badWorkObject) + if err := block.ProtoDecode(protoBlock); err != nil { return err } list[i] = block @@ -974,116 +1032,33 @@ func (s *badBlockList) ProtoDecode(pb *ProtoBadBlocks, location common.Location) return nil } -// ReadBadBlock retrieves the bad block with the corresponding block hash. -func ReadBadBlock(db ethdb.Reader, hash common.Hash, location common.Location) *types.Block { - blob, err := db.Get(badBlockKey) +// ReadBadWorkObject retrieves the bad workObject with the corresponding workObject hash. +func ReadBadWorkObject(db ethdb.Reader, hash common.Hash) *types.WorkObject { + blob, err := db.Get(badWorkObjectKey) if err != nil { return nil } - protoBadBlocks := new(ProtoBadBlocks) - err = proto.Unmarshal(blob, protoBadBlocks) + protoBadWorkObjects := new(ProtoBadWorkObjects) + err = proto.Unmarshal(blob, protoBadWorkObjects) if err != nil { return nil } - badBlocks := new(badBlockList) - err = badBlocks.ProtoDecode(protoBadBlocks, location) + badWorkObjects := new(badWorkObjectList) + err = badWorkObjects.ProtoDecode(protoBadWorkObjects) if err != nil { return nil } - for _, bad := range *badBlocks { - if bad.Header.Hash() == hash { - return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles, bad.Body.ExtTransactions, bad.Body.SubManifest, bad.Body.InterlinkHashes) + for _, bad := range *badWorkObjects { + if bad.woHeader.Hash() == hash { + return types.NewWorkObject(bad.woHeader, bad.woBody, nil, types.BlockObject) } } return nil } -// ReadAllBadBlocks retrieves all the bad blocks in the database. -// All returned blocks are sorted in reverse order by number. -func ReadAllBadBlocks(db ethdb.Reader, location common.Location) []*types.Block { - blob, err := db.Get(badBlockKey) - if err != nil { - return nil - } - - protoBadBlocks := new(ProtoBadBlocks) - err = proto.Unmarshal(blob, protoBadBlocks) - if err != nil { - return nil - } - badBlocks := new(badBlockList) - - err = badBlocks.ProtoDecode(protoBadBlocks, location) - if err != nil { - return nil - } - var blocks []*types.Block - for _, bad := range *badBlocks { - blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles, bad.Body.ExtTransactions, bad.Body.SubManifest, bad.Body.InterlinkHashes)) - } - return blocks -} - -// WriteBadBlock serializes the bad block into the database. If the cumulated -// bad blocks exceeds the limitation, the oldest will be dropped. -func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block, location common.Location) { - blob, err := db.Get(badBlockKey) - if err != nil { - log.Global.WithField("err", err).Warn("Failed to load old bad blocks") - } - - protoBadBlocks := new(ProtoBadBlocks) - err = proto.Unmarshal(blob, protoBadBlocks) - if err != nil { - log.Global.WithField("err", err).Warn("Failed to proto Unmarshal bad blocks") - } - badBlocksList := badBlockList{} - if len(blob) > 0 { - err := badBlocksList.ProtoDecode(protoBadBlocks, location) - if err != nil { - log.Global.WithField("err", err).Fatal("Failed to decode old bad blocks") - } - } - badBlocks := badBlocksList - nodeCtx := location.Context() - for _, b := range badBlocks { - if b.Header.NumberU64(nodeCtx) == block.NumberU64(nodeCtx) && b.Header.Hash() == block.Hash() { - log.Global.WithFields(log.Fields{ - "number": block.NumberU64(nodeCtx), - "hash": block.Hash(), - }).Info("Skip duplicated bad block") - return - } - } - badBlocks = append(badBlocks, &badBlock{ - Header: block.Header(), - Body: block.Body(), - }) - sort.Sort(sort.Reverse(badBlocks)) - if len(badBlocks) > badBlockToKeep { - blocks := badBlocks - badBlocks = blocks[:badBlockToKeep] - } - protoBadBlocks = badBlocks.ProtoEncode() - data, err := proto.Marshal(protoBadBlocks) - if err != nil { - log.Global.WithField("err", err).Fatal("Failed to encode bad blocks") - } - if err := db.Put(badBlockKey, data); err != nil { - log.Global.WithField("err", err).Fatal("Failed to write bad blocks") - } -} - -// DeleteBadBlocks deletes all the bad blocks from the database -func DeleteBadBlocks(db ethdb.KeyValueWriter) { - if err := db.Delete(badBlockKey); err != nil { - log.Global.WithField("err", err).Fatal("Failed to delete bad blocks") - } -} - // FindCommonAncestor returns the last common ancestor of two block headers -func FindCommonAncestor(db ethdb.Reader, a, b *types.Header, nodeCtx int) *types.Header { +func FindCommonAncestor(db ethdb.Reader, a, b *types.WorkObject, nodeCtx int) *types.WorkObject { for bn := b.NumberU64(nodeCtx); a.NumberU64(nodeCtx) > bn; { a = ReadHeader(db, a.ParentHash(nodeCtx), a.NumberU64(nodeCtx)-1) if a == nil { @@ -1110,7 +1085,7 @@ func FindCommonAncestor(db ethdb.Reader, a, b *types.Header, nodeCtx int) *types } // ReadHeadHeader returns the current canonical head header. -func ReadHeadHeader(db ethdb.Reader) *types.Header { +func ReadHeadHeader(db ethdb.Reader) *types.WorkObject { headHeaderHash := ReadHeadHeaderHash(db) if headHeaderHash == (common.Hash{}) { return nil @@ -1123,16 +1098,16 @@ func ReadHeadHeader(db ethdb.Reader) *types.Header { } // ReadHeadBlock returns the current canonical head block. -func ReadHeadBlock(db ethdb.Reader, location common.Location) *types.Block { - headBlockHash := ReadHeadBlockHash(db) - if headBlockHash == (common.Hash{}) { +func ReadHeadBlock(db ethdb.Reader) *types.WorkObject { + headWorkObjectHash := ReadHeadBlockHash(db) + if headWorkObjectHash == (common.Hash{}) { return nil } - headBlockNumber := ReadHeaderNumber(db, headBlockHash) - if headBlockNumber == nil { + headWorkObjectNumber := ReadHeaderNumber(db, headWorkObjectHash) + if headWorkObjectNumber == nil { return nil } - return ReadBlock(db, headBlockHash, *headBlockNumber, location) + return ReadWorkObject(db, headWorkObjectHash, types.BlockObject) } // ReadEtxSetProto retrieves the EtxSet corresponding to a given block, in Proto encoding. @@ -1175,7 +1150,7 @@ func WriteEtxSetProto(db ethdb.KeyValueWriter, hash common.Hash, number uint64, } // ReadEtxSet retreives the EtxSet corresponding to a given block -func ReadEtxSet(db ethdb.Reader, hash common.Hash, number uint64, location common.Location) *types.EtxSet { +func ReadEtxSet(db ethdb.Reader, hash common.Hash, number uint64) *types.EtxSet { data, err := ReadEtxSetProto(db, hash, number) if err != nil { log.Global.WithError(err).Error("Failed to read etx set") @@ -1188,7 +1163,7 @@ func ReadEtxSet(db ethdb.Reader, hash common.Hash, number uint64, location commo etxSet := types.EtxSet{ ETXHashes: make([]byte, 0), } - err = etxSet.ProtoDecode(protoEtxSet, location) + err = etxSet.ProtoDecode(protoEtxSet) if err != nil { log.Global.WithFields(log.Fields{ "hash": hash, @@ -1217,7 +1192,7 @@ func DeleteEtxSet(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { } } -func ReadETX(db ethdb.Reader, hash common.Hash, location common.Location) *types.Transaction { +func ReadETX(db ethdb.Reader, hash common.Hash) *types.Transaction { data, _ := db.Get(etxKey(hash)) if len(data) == 0 { return nil @@ -1227,7 +1202,7 @@ func ReadETX(db ethdb.Reader, hash common.Hash, location common.Location) *types log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal etx") } etx := new(types.Transaction) - if err := etx.ProtoDecode(protoEtx, location); err != nil { + if err := etx.ProtoDecode(protoEtx, db.Location()); err != nil { log.Global.WithFields(log.Fields{ "hash": hash, "err": err, @@ -1285,7 +1260,7 @@ func ReadPendingEtxs(db ethdb.Reader, hash common.Hash) *types.PendingEtxs { log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal pending etxs") } pendingEtxs := new(types.PendingEtxs) - if err := pendingEtxs.ProtoDecode(protoPendingEtxs); err != nil { + if err := pendingEtxs.ProtoDecode(protoPendingEtxs, db.Location()); err != nil { log.Global.WithFields(log.Fields{ "hash": hash, "err": err, @@ -1316,7 +1291,7 @@ func DeletePendingEtxs(db ethdb.KeyValueWriter, hash common.Hash) { } // ReadPendingEtxsRollup retreives the pending ETXs rollup corresponding to a given block -func ReadPendingEtxsRollup(db ethdb.Reader, hash common.Hash, location common.Location) *types.PendingEtxsRollup { +func ReadPendingEtxsRollup(db ethdb.Reader, hash common.Hash) *types.PendingEtxsRollup { // Try to look up the data in leveldb. data, _ := db.Get(pendingEtxsRollupKey(hash)) if len(data) == 0 { @@ -1328,7 +1303,7 @@ func ReadPendingEtxsRollup(db ethdb.Reader, hash common.Hash, location common.Lo log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal pending etxs rollup") } pendingEtxsRollup := new(types.PendingEtxsRollup) - err = pendingEtxsRollup.ProtoDecode(protoPendingEtxsRollup, location) + err = pendingEtxsRollup.ProtoDecode(protoPendingEtxsRollup, db.Location()) if err != nil { log.Global.WithFields(log.Fields{ "hash": hash, @@ -1539,7 +1514,7 @@ func WriteInboundEtxs(db ethdb.KeyValueWriter, hash common.Hash, inboundEtxs typ } // ReadInboundEtxs reads the inbound etxs from the database -func ReadInboundEtxs(db ethdb.Reader, hash common.Hash, location common.Location) types.Transactions { +func ReadInboundEtxs(db ethdb.Reader, hash common.Hash) types.Transactions { // Try to look up the data in leveldb. data, err := db.Get(inboundEtxsKey(hash)) if err != nil { @@ -1551,7 +1526,7 @@ func ReadInboundEtxs(db ethdb.Reader, hash common.Hash, location common.Location log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal inbound etxs") } inboundEtxs := types.Transactions{} - err = inboundEtxs.ProtoDecode(protoInboundEtxs, location) + err = inboundEtxs.ProtoDecode(protoInboundEtxs, db.Location()) if err != nil { log.Global.WithFields(log.Fields{ "hash": hash, diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index ea7cd470ba..35e8bd2b04 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -2,7 +2,7 @@ package rawdb import ( "math/big" - "reflect" + reflect "reflect" "testing" "github.com/dominant-strategies/go-quai/common" @@ -14,7 +14,7 @@ func TestHeaderStorage(t *testing.T) { db := NewMemoryDatabase() // Create a test header to move around the database and make sure it's really new - header := types.EmptyHeader() + header := types.EmptyHeader(2) header.SetParentHash(common.Hash{1}, common.ZONE_CTX) header.SetBaseFee(big.NewInt(1)) @@ -23,7 +23,7 @@ func TestHeaderStorage(t *testing.T) { } t.Log("Header Hash stored", header.Hash()) // Write and verify the header in the database - WriteHeader(db, header, common.ZONE_CTX) + WriteHeader(db, header.Header(), common.ZONE_CTX) if entry := ReadHeader(db, header.Hash(), header.Number(common.ZONE_CTX).Uint64()); entry == nil { t.Fatalf("Stored header not found with hash %s", entry.Hash()) } else if entry.Hash() != header.Hash() { @@ -67,19 +67,18 @@ func TestEtxSetStorage(t *testing.T) { etxSet := types.NewEtxSet() hash := common.Hash{1} var number uint64 = 0 - location := common.Location{0, 0} - if entry := ReadEtxSet(db, hash, number, location); entry != nil { + if entry := ReadEtxSet(db, hash, number); entry != nil { t.Fatalf("Non existent etxSet returned: %v", entry) } t.Log("EtxSet Hash stored", hash) // Write and verify the etxSet in the database WriteEtxSet(db, hash, 0, etxSet) - if entry := ReadEtxSet(db, hash, number, location); entry == nil { + if entry := ReadEtxSet(db, hash, number); entry == nil { t.Fatalf("Stored etxSet not found with hash %s", hash) } // Delete the etxSet and verify the execution DeleteEtxSet(db, hash, number) - if entry := ReadEtxSet(db, hash, number, location); entry != nil { + if entry := ReadEtxSet(db, hash, number); entry != nil { t.Fatalf("Deleted etxSet returned: %v", entry) } } @@ -88,7 +87,6 @@ func TestEtxSetStorage(t *testing.T) { func TestInboundEtxsStorage(t *testing.T) { db := NewMemoryDatabase() hash := common.Hash{1} - location := common.Location{0, 0} to := common.BytesToAddress([]byte{0x01}, common.Location{0, 0}) inner := &types.QuaiTx{ @@ -108,13 +106,13 @@ func TestInboundEtxsStorage(t *testing.T) { tx := types.NewTx(inner) inboundEtxs := types.Transactions{tx} - if entry := ReadInboundEtxs(db, hash, location); entry != nil { + if entry := ReadInboundEtxs(db, hash); entry != nil { t.Fatalf("Non existent inbound etxs returned: %v", entry) } t.Log("Inbound InboundEtxs stored", inboundEtxs) // Write and verify the inboundEtxs in the database WriteInboundEtxs(db, hash, inboundEtxs) - if entry := ReadInboundEtxs(db, hash, location); entry == nil { + if entry := ReadInboundEtxs(db, hash); entry == nil { t.Fatalf("Stored InboundEtxs not found with hash %s", hash) } else { t.Log("InboundEtxs", entry) @@ -122,7 +120,37 @@ func TestInboundEtxsStorage(t *testing.T) { } // Delete the inboundEtxs and verify the execution DeleteInboundEtxs(db, hash) - if entry := ReadInboundEtxs(db, hash, location); entry != nil { + if entry := ReadInboundEtxs(db, hash); entry != nil { t.Fatalf("Deleted InboundEtxs returned: %v", entry) } } + +// Tests block header storage and retrieval operations. +func TestWorkObjectStorage(t *testing.T) { + db := NewMemoryDatabase() + + // Create a test header to move around the database and make sure it's really new + woBody := &types.WorkObjectBody{} + woBody.SetTransactions([]*types.Transaction{}) + woBody.SetExtTransactions([]*types.Transaction{}) + woBody.SetHeader(types.EmptyHeader(2).Header()) + header := types.NewWorkObject(types.NewWorkObjectHeader(types.EmptyRootHash, types.EmptyRootHash, big.NewInt(11), big.NewInt(30000), types.EmptyRootHash, types.BlockNonce{23}, common.LocationFromAddressBytes([]byte{0x01, 0x01})), woBody, nil) + + if entry := ReadWorkObject(db, header.Hash(), types.BlockObject); entry != nil { + t.Fatalf("Non existent header returned: %v", entry) + } + t.Log("Header Hash stored", header.Hash()) + // Write and verify the header in the database + WriteWorkObject(db, header.Hash(), header, types.BlockObject, common.ZONE_CTX) + entry := ReadWorkObject(db, header.Hash(), types.BlockObject) + if entry == nil { + t.Fatalf("Stored header not found with hash %s", entry.Hash()) + } else if entry.Hash() != header.Hash() { + t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header) + } + // Delete the header and verify the execution + DeleteWorkObject(db, header.Hash(), header.Number(common.ZONE_CTX).Uint64(), types.BlockObject) + if entry := ReadWorkObject(db, header.Hash(), types.BlockObject); entry != nil { + t.Fatalf("Deleted header returned: %v", entry) + } +} diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go index 9f6a8fd6fc..e632a93b6b 100644 --- a/core/rawdb/accessors_indexes.go +++ b/core/rawdb/accessors_indexes.go @@ -79,9 +79,9 @@ func WriteTxLookupEntries(db ethdb.KeyValueWriter, number uint64, hashes []commo // WriteTxLookupEntriesByBlock stores a positional metadata for every transaction from // a block, enabling hash based transaction and receipt lookups. -func WriteTxLookupEntriesByBlock(db ethdb.KeyValueWriter, block *types.Block, nodeCtx int) { - numberBytes := block.Number(nodeCtx).Bytes() - for _, tx := range block.Transactions() { +func WriteTxLookupEntriesByBlock(db ethdb.KeyValueWriter, wo *types.WorkObject, nodeCtx int) { + numberBytes := wo.Number(nodeCtx).Bytes() + for _, tx := range wo.Body().Transactions() { writeTxLookupEntry(db, tx.Hash(), numberBytes) } } @@ -102,7 +102,7 @@ func DeleteTxLookupEntries(db ethdb.KeyValueWriter, hashes []common.Hash) { // ReadTransaction retrieves a specific transaction from the database, along with // its added positional metadata. -func ReadTransaction(db ethdb.Reader, hash common.Hash, location common.Location) (*types.Transaction, common.Hash, uint64, uint64) { +func ReadTransaction(db ethdb.Reader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) { blockNumber := ReadTxLookupEntry(db, hash) if blockNumber == nil { return nil, common.Hash{}, 0, 0 @@ -111,15 +111,15 @@ func ReadTransaction(db ethdb.Reader, hash common.Hash, location common.Location if blockHash == (common.Hash{}) { return nil, common.Hash{}, 0, 0 } - body := ReadBody(db, blockHash, *blockNumber, location) - if body == nil { + wo := ReadWorkObject(db, blockHash, types.BlockObject) + if wo == nil { log.Global.WithFields(log.Fields{ "number": blockNumber, "hash": blockHash, }).Error("Transaction referenced missing") return nil, common.Hash{}, 0, 0 } - for txIndex, tx := range body.Transactions { + for txIndex, tx := range wo.Body().Transactions() { if tx.Hash() == hash { return tx, blockHash, *blockNumber, uint64(txIndex) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index fc834fcad5..ef64be9c01 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -126,7 +126,7 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { // NewDatabaseWithFreezer creates a high level database on top of a given key- // value data store with a freezer moving immutable chain segments into cold // storage. -func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool, nodeCtx int, logger *log.Logger) (ethdb.Database, error) { +func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool, nodeCtx int, logger *log.Logger, location common.Location) (ethdb.Database, error) { // Create the idle freezer instance frdb, err := newFreezer(freezer, namespace, readonly, logger) if err != nil { @@ -217,8 +217,8 @@ func NewMemoryDatabaseWithCap(size int) ethdb.Database { // NewLevelDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger) (ethdb.Database, error) { - db, err := leveldb.New(file, cache, handles, namespace, readonly, logger) +func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger, location common.Location) (ethdb.Database, error) { + db, err := leveldb.New(file, cache, handles, namespace, readonly, logger, location) if err != nil { return nil, err } @@ -265,7 +265,7 @@ type OpenOptions struct { // +---------------------------------------- // db is non-existent | leveldb default | specified type // db is existent | from db | specified type (if compatible) -func openKeyValueDatabase(o OpenOptions, logger *log.Logger) (ethdb.Database, error) { +func openKeyValueDatabase(o OpenOptions, logger *log.Logger, location common.Location) (ethdb.Database, error) { existingDb := hasPreexistingDb(o.Directory) if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) @@ -273,7 +273,7 @@ func openKeyValueDatabase(o OpenOptions, logger *log.Logger) (ethdb.Database, er if o.Type == dbPebble || existingDb == dbPebble { if PebbleEnabled { logger.Info("Using pebble as the backing database") - return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, logger) + return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, logger, location) } else { return nil, errors.New("db.engine 'pebble' not supported on this platform") } @@ -283,7 +283,7 @@ func openKeyValueDatabase(o OpenOptions, logger *log.Logger) (ethdb.Database, er } logger.Info("Using leveldb as the backing database") // Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly - return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, logger) + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, logger, location) } // Open opens both a disk-based key-value database such as leveldb or pebble, but also @@ -291,15 +291,15 @@ func openKeyValueDatabase(o OpenOptions, logger *log.Logger) (ethdb.Database, er // set on the provided OpenOptions. // The passed o.AncientDir indicates the path of root ancient directory where // the chain freezer can be opened. -func Open(o OpenOptions, nodeCtx int, logger *log.Logger) (ethdb.Database, error) { - kvdb, err := openKeyValueDatabase(o, logger) +func Open(o OpenOptions, nodeCtx int, logger *log.Logger, location common.Location) (ethdb.Database, error) { + kvdb, err := openKeyValueDatabase(o, logger, location) if err != nil { return nil, err } if len(o.AncientsDirectory) == 0 { return kvdb, nil } - frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, nodeCtx, logger) + frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, nodeCtx, logger, location) if err != nil { kvdb.Close() return nil, err @@ -430,10 +430,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte, logger *log. default: var accounted bool for _, meta := range [][]byte{ - databaseVersionKey, headHeaderKey, headBlockKey, lastPivotKey, + databaseVersionKey, headHeaderKey, headWorkObjectKey, lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, - uncleanShutdownKey, badBlockKey, + uncleanShutdownKey, badWorkObjectKey, } { if bytes.Equal(key, meta) { metadata.Add(size) diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go index 2968caeca1..1ccc1b9c6e 100644 --- a/core/rawdb/databases_64bit.go +++ b/core/rawdb/databases_64bit.go @@ -19,6 +19,7 @@ package rawdb import ( + "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/ethdb/pebble" "github.com/dominant-strategies/go-quai/log" @@ -29,8 +30,8 @@ const PebbleEnabled = true // NewPebbleDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger) (ethdb.Database, error) { - db, err := pebble.New(file, cache, handles, namespace, readonly, logger) +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger, location common.Location) (ethdb.Database, error) { + db, err := pebble.New(file, cache, handles, namespace, readonly, logger, location) if err != nil { return nil, err } diff --git a/core/rawdb/db.pb.go b/core/rawdb/db.pb.go index 1d7c41b6a5..30fb67b07d 100644 --- a/core/rawdb/db.pb.go +++ b/core/rawdb/db.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.30.0 +// protoc v4.25.1 // source: core/rawdb/db.proto package rawdb @@ -69,17 +69,18 @@ func (x *ProtoNumber) GetNumber() uint64 { return 0 } -type ProtoBadBlock struct { +type ProtoBadWorkObject struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Header *types.ProtoHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Body *types.ProtoBody `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` + WoHeader *types.ProtoWorkObjectHeader `protobuf:"bytes,1,opt,name=wo_header,json=woHeader,proto3" json:"wo_header,omitempty"` + WoBody *types.ProtoWorkObjectBody `protobuf:"bytes,2,opt,name=wo_body,json=woBody,proto3" json:"wo_body,omitempty"` + Tx *types.ProtoTransaction `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` } -func (x *ProtoBadBlock) Reset() { - *x = ProtoBadBlock{} +func (x *ProtoBadWorkObject) Reset() { + *x = ProtoBadWorkObject{} if protoimpl.UnsafeEnabled { mi := &file_core_rawdb_db_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -87,13 +88,13 @@ func (x *ProtoBadBlock) Reset() { } } -func (x *ProtoBadBlock) String() string { +func (x *ProtoBadWorkObject) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProtoBadBlock) ProtoMessage() {} +func (*ProtoBadWorkObject) ProtoMessage() {} -func (x *ProtoBadBlock) ProtoReflect() protoreflect.Message { +func (x *ProtoBadWorkObject) ProtoReflect() protoreflect.Message { mi := &file_core_rawdb_db_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -105,35 +106,42 @@ func (x *ProtoBadBlock) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProtoBadBlock.ProtoReflect.Descriptor instead. -func (*ProtoBadBlock) Descriptor() ([]byte, []int) { +// Deprecated: Use ProtoBadWorkObject.ProtoReflect.Descriptor instead. +func (*ProtoBadWorkObject) Descriptor() ([]byte, []int) { return file_core_rawdb_db_proto_rawDescGZIP(), []int{1} } -func (x *ProtoBadBlock) GetHeader() *types.ProtoHeader { +func (x *ProtoBadWorkObject) GetWoHeader() *types.ProtoWorkObjectHeader { if x != nil { - return x.Header + return x.WoHeader } return nil } -func (x *ProtoBadBlock) GetBody() *types.ProtoBody { +func (x *ProtoBadWorkObject) GetWoBody() *types.ProtoWorkObjectBody { if x != nil { - return x.Body + return x.WoBody } return nil } -type ProtoBadBlocks struct { +func (x *ProtoBadWorkObject) GetTx() *types.ProtoTransaction { + if x != nil { + return x.Tx + } + return nil +} + +type ProtoBadWorkObjects struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BadBlocks []*ProtoBadBlock `protobuf:"bytes,1,rep,name=bad_blocks,json=badBlocks,proto3" json:"bad_blocks,omitempty"` + BadWorkObjects []*ProtoBadWorkObject `protobuf:"bytes,1,rep,name=bad_work_objects,json=badWorkObjects,proto3" json:"bad_work_objects,omitempty"` } -func (x *ProtoBadBlocks) Reset() { - *x = ProtoBadBlocks{} +func (x *ProtoBadWorkObjects) Reset() { + *x = ProtoBadWorkObjects{} if protoimpl.UnsafeEnabled { mi := &file_core_rawdb_db_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -141,13 +149,13 @@ func (x *ProtoBadBlocks) Reset() { } } -func (x *ProtoBadBlocks) String() string { +func (x *ProtoBadWorkObjects) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProtoBadBlocks) ProtoMessage() {} +func (*ProtoBadWorkObjects) ProtoMessage() {} -func (x *ProtoBadBlocks) ProtoReflect() protoreflect.Message { +func (x *ProtoBadWorkObjects) ProtoReflect() protoreflect.Message { mi := &file_core_rawdb_db_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -159,14 +167,14 @@ func (x *ProtoBadBlocks) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProtoBadBlocks.ProtoReflect.Descriptor instead. -func (*ProtoBadBlocks) Descriptor() ([]byte, []int) { +// Deprecated: Use ProtoBadWorkObjects.ProtoReflect.Descriptor instead. +func (*ProtoBadWorkObjects) Descriptor() ([]byte, []int) { return file_core_rawdb_db_proto_rawDescGZIP(), []int{2} } -func (x *ProtoBadBlocks) GetBadBlocks() []*ProtoBadBlock { +func (x *ProtoBadWorkObjects) GetBadWorkObjects() []*ProtoBadWorkObject { if x != nil { - return x.BadBlocks + return x.BadWorkObjects } return nil } @@ -244,29 +252,35 @@ var file_core_rawdb_db_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x25, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x61, 0x0a, 0x0d, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x42, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2a, 0x0a, 0x06, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22, 0x42, 0x0a, 0x0e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x30, - 0x0a, 0x0a, 0x62, 0x61, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x64, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x61, 0x64, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x09, 0x62, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, - 0x22, 0x78, 0x0a, 0x18, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, - 0x78, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x04, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x04, 0x68, - 0x61, 0x73, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x49, - 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x6e, - 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x2d, - 0x71, 0x75, 0x61, 0x69, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x61, 0x77, 0x64, 0x62, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0xad, 0x01, 0x0a, 0x12, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x42, 0x61, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x39, 0x0a, 0x09, 0x77, 0x6f, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x52, 0x08, 0x77, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x33, 0x0a, 0x07, 0x77, + 0x6f, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x06, 0x77, 0x6f, 0x42, 0x6f, 0x64, 0x79, + 0x12, 0x27, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x02, 0x74, 0x78, 0x22, 0x57, 0x0a, 0x13, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x61, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x12, 0x40, 0x0a, 0x10, 0x62, 0x61, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x62, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x61, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x0e, 0x62, 0x61, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x22, 0x78, 0x0a, 0x18, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x65, 0x67, 0x61, 0x63, + 0x79, 0x54, 0x78, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, + 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, + 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x33, 0x5a, 0x31, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, + 0x61, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, + 0x6f, 0x2d, 0x71, 0x75, 0x61, 0x69, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x72, 0x61, 0x77, 0x64, + 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -283,24 +297,26 @@ func file_core_rawdb_db_proto_rawDescGZIP() []byte { var file_core_rawdb_db_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_core_rawdb_db_proto_goTypes = []interface{}{ - (*ProtoNumber)(nil), // 0: db.ProtoNumber - (*ProtoBadBlock)(nil), // 1: db.ProtoBadBlock - (*ProtoBadBlocks)(nil), // 2: db.ProtoBadBlocks - (*ProtoLegacyTxLookupEntry)(nil), // 3: db.ProtoLegacyTxLookupEntry - (*types.ProtoHeader)(nil), // 4: block.ProtoHeader - (*types.ProtoBody)(nil), // 5: block.ProtoBody - (*common.ProtoHash)(nil), // 6: common.ProtoHash + (*ProtoNumber)(nil), // 0: db.ProtoNumber + (*ProtoBadWorkObject)(nil), // 1: db.ProtoBadWorkObject + (*ProtoBadWorkObjects)(nil), // 2: db.ProtoBadWorkObjects + (*ProtoLegacyTxLookupEntry)(nil), // 3: db.ProtoLegacyTxLookupEntry + (*types.ProtoWorkObjectHeader)(nil), // 4: block.ProtoWorkObjectHeader + (*types.ProtoWorkObjectBody)(nil), // 5: block.ProtoWorkObjectBody + (*types.ProtoTransaction)(nil), // 6: block.ProtoTransaction + (*common.ProtoHash)(nil), // 7: common.ProtoHash } var file_core_rawdb_db_proto_depIdxs = []int32{ - 4, // 0: db.ProtoBadBlock.header:type_name -> block.ProtoHeader - 5, // 1: db.ProtoBadBlock.body:type_name -> block.ProtoBody - 1, // 2: db.ProtoBadBlocks.bad_blocks:type_name -> db.ProtoBadBlock - 6, // 3: db.ProtoLegacyTxLookupEntry.hash:type_name -> common.ProtoHash - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 4, // 0: db.ProtoBadWorkObject.wo_header:type_name -> block.ProtoWorkObjectHeader + 5, // 1: db.ProtoBadWorkObject.wo_body:type_name -> block.ProtoWorkObjectBody + 6, // 2: db.ProtoBadWorkObject.tx:type_name -> block.ProtoTransaction + 1, // 3: db.ProtoBadWorkObjects.bad_work_objects:type_name -> db.ProtoBadWorkObject + 7, // 4: db.ProtoLegacyTxLookupEntry.hash:type_name -> common.ProtoHash + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_core_rawdb_db_proto_init() } @@ -322,7 +338,7 @@ func file_core_rawdb_db_proto_init() { } } file_core_rawdb_db_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoBadBlock); i { + switch v := v.(*ProtoBadWorkObject); i { case 0: return &v.state case 1: @@ -334,7 +350,7 @@ func file_core_rawdb_db_proto_init() { } } file_core_rawdb_db_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoBadBlocks); i { + switch v := v.(*ProtoBadWorkObjects); i { case 0: return &v.state case 1: diff --git a/core/rawdb/db.proto b/core/rawdb/db.proto index 3e753ac87b..cce42b7de7 100644 --- a/core/rawdb/db.proto +++ b/core/rawdb/db.proto @@ -8,12 +8,15 @@ import "common/proto_common.proto"; message ProtoNumber { uint64 number = 1; } -message ProtoBadBlock { - block.ProtoHeader header = 1; - block.ProtoBody body = 2; +message ProtoBadWorkObject { + block.ProtoWorkObjectHeader wo_header = 1; + block.ProtoWorkObjectBody wo_body = 2; + block.ProtoTransaction tx = 3; } -message ProtoBadBlocks { repeated ProtoBadBlock bad_blocks = 1; } +message ProtoBadWorkObjects { + repeated ProtoBadWorkObject bad_work_objects = 1; +} message ProtoLegacyTxLookupEntry { common.ProtoHash hash = 1; diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 5b53c29fd6..2c06a6bc70 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -27,6 +27,7 @@ import ( "time" "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/params" @@ -302,7 +303,7 @@ func (f *freezer) Sync() error { // // This functionality is deliberately broken off from block importing to avoid // incurring additional data shuffling delays on block propagation. -func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { +func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int, location common.Location) { nfdb := &nofreezedb{KeyValueStore: db} var ( @@ -442,7 +443,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { for i := 0; i < len(ancients); i++ { // Always keep the genesis block in active database if first+uint64(i) != 0 { - DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i)) + DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i), types.BlockObject) DeleteCanonicalHash(batch, first+uint64(i)) } } @@ -462,7 +463,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { "number": number, "hash": hash, }).Trace("Deleting side chain") - DeleteBlock(batch, hash, number) + DeleteWorkObject(batch, hash, number, types.BlockObject) } } } @@ -505,7 +506,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { "hash": children[i], "parent": child.ParentHash(nodeCtx), }).Debug("Deleting dangling block") - DeleteBlock(batch, children[i], tip) + DeleteWorkObject(batch, children[i], tip, types.BlockObject) } dangling = children tip++ diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index b683bd7ad1..a040d22028 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -33,7 +33,7 @@ var ( headHeaderKey = []byte("LastHeader") // headBlockKey tracks the latest known full block's hash. - headBlockKey = []byte("LastBlock") + headWorkObjectKey = []byte("LastWorkObject") // headersHashKey tracks the latest known headers hash in Blockchain. headsHashesKey = []byte("HeadersHash") @@ -74,8 +74,8 @@ var ( // fastTxLookupLimitKey tracks the transaction lookup limit during fast sync. fastTxLookupLimitKey = []byte("FastTransactionLookupLimit") - // badBlockKey tracks the list of bad blocks seen by local - badBlockKey = []byte("InvalidBlock") + // badWorkObjectKey tracks the list of bad blocks seen by local + badWorkObjectKey = []byte("InvalidWorkObject") // uncleanShutdownKey tracks the list of local crashes uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db @@ -89,17 +89,25 @@ var ( headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian) - pendingHeaderPrefix = []byte("ph") // pendingHeaderPrefix + hash -> header - candidateBodyPrefix = []byte("cb") // candidateBodyPrefix + hash -> Body - pbBodyPrefix = []byte("pb") // pbBodyPrefix + hash -> *types.Body - pbBodyHashPrefix = []byte("pbKey") // pbBodyPrefix -> []common.Hash - phTerminiPrefix = []byte("pht") // phTerminiPrefix + hash -> []common.Hash - phBodyPrefix = []byte("pc") // phBodyPrefix + hash -> []common.Hash + Td - terminiPrefix = []byte("tk") //terminiPrefix + hash -> []common.Hash - badHashesListPrefix = []byte("bh") - inboundEtxsPrefix = []byte("ie") // inboundEtxsPrefix + hash -> types.Transactions - spentUTXOsPrefix = []byte("sutxo") // spentUTXOsPrefix + hash -> []types.SpentTxOut - AddressUtxosPrefix = []byte("au") // addressUtxosPrefix + hash -> []types.UtxoEntry + pendingHeaderPrefix = []byte("ph") // pendingHeaderPrefix + hash -> header + candidateBodyPrefix = []byte("cb") // candidateBodyPrefix + hash -> Body + pbBodyPrefix = []byte("pb") // pbBodyPrefix + hash -> *types.Body + pbBodyHashPrefix = []byte("pbKey") // pbBodyPrefix -> []common.Hash + phTerminiPrefix = []byte("pht") // phTerminiPrefix + hash -> []common.Hash + phBodyPrefix = []byte("pc") // phBodyPrefix + hash -> []common.Hash + Td + terminiPrefix = []byte("tk") //terminiPrefix + hash -> []common.Hash + blockWorkObjectHeaderPrefix = []byte("bw") //blockWObjectHeaderPrefix + hash -> []common.Hash + txWorkObjectHeaderPrefix = []byte("tw") //txWorkObjectHeaderPrefix + hash -> []common.Hash + phWorkObjectHeaderPrefix = []byte("pw") //phWorkObjectHeaderPrefix + hash -> []common.Hash + workObjectBodyPrefix = []byte("wb") //workObjectBodyPrefix + hash -> []common.Hash + blockWorkObjectPrefix = []byte("bo") //blockWorkObjectPrefix + hash -> []common.Hash + txWorkObjectPrefix = []byte("to") //txWorkObjectPrefix + hash -> []common.Hash + phWorkObjectPrefix = []byte("po") //phWorkObjectPrefix + hash -> []common.Hash + badHashesListPrefix = []byte("bh") + inboundEtxsPrefix = []byte("ie") // inboundEtxsPrefix + hash -> types.Transactions + UtxoPrefix = []byte("ut") // outpointPrefix + hash -> types.Outpoint + spentUTXOsPrefix = []byte("sutxo") // spentUTXOsPrefix + hash -> []types.SpentTxOut + AddressUtxosPrefix = []byte("au") // addressUtxosPrefix + hash -> []types.UtxoEntry blockBodyPrefix = []byte("b") // blockBodyPrefix + num (uint64 big endian) + hash -> block body blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts @@ -205,6 +213,26 @@ func terminiKey(hash common.Hash) []byte { return append(terminiPrefix, hash.Bytes()...) } +// blockWorkObjectHeaderKey = workObjectHeaderPrefix + hash +func blockWorkObjectHeaderKey(hash common.Hash) []byte { + return append(blockWorkObjectHeaderPrefix, hash.Bytes()...) +} + +// txObjectHeaderKey = workObjectHeaderPrefix + hash +func txWorkObjectHeaderKey(hash common.Hash) []byte { + return append(txWorkObjectHeaderPrefix, hash.Bytes()...) +} + +// phObjectHeaderKey = workObjectHeaderPrefix + hash +func phWorkObjectHeaderKey(hash common.Hash) []byte { + return append(phWorkObjectHeaderPrefix, hash.Bytes()...) +} + +// workObjectBodyKey = workObjectBodyPrefix + hash +func workObjectBodyKey(hash common.Hash) []byte { + return append(workObjectBodyPrefix, hash.Bytes()...) +} + // pendingHeaderKey = pendingHeaderPrefix + hash func pendingHeaderKey(hash common.Hash) []byte { return append(pendingHeaderPrefix, hash.Bytes()...) diff --git a/core/rawdb/table.go b/core/rawdb/table.go index bb6072ed4a..f5c2d10a68 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -17,24 +17,31 @@ package rawdb import ( + "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/ethdb" ) // table is a wrapper around a database that prefixes each key access with a pre- // configured string. type table struct { - db ethdb.Database - prefix string + db ethdb.Database + prefix string + location common.Location } // NewTable returns a database object that prefixes all keys with a given string. -func NewTable(db ethdb.Database, prefix string) ethdb.Database { +func NewTable(db ethdb.Database, prefix string, location common.Location) ethdb.Database { return &table{ - db: db, - prefix: prefix, + db: db, + prefix: prefix, + location: location, } } +func (t *table) Location() common.Location { + return t.location +} + // Close is a noop to implement the Database interface. func (t *table) Close() error { return nil diff --git a/core/slice.go b/core/slice.go index 32d5d12ebb..58202a5b95 100644 --- a/core/slice.go +++ b/core/slice.go @@ -71,7 +71,7 @@ type Slice struct { missingBlockFeed event.Feed pEtxRetryCache *lru.Cache - asyncPhCh chan *types.Header + asyncPhCh chan *types.WorkObject asyncPhSub event.Subscription bestPhKey common.Hash @@ -86,7 +86,7 @@ type Slice struct { logger *log.Logger } -func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.Header) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, currentExpansionNumber uint8, genesisBlock *types.Block, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, indexerConfig *IndexerConfig, vmConfig vm.Config, genesis *Genesis, logger *log.Logger) (*Slice, error) { +func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.WorkObject) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, currentExpansionNumber uint8, genesisBlock *types.WorkObject, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, indexerConfig *IndexerConfig, vmConfig vm.Config, genesis *Genesis, logger *log.Logger) (*Slice, error) { nodeCtx := chainConfig.Location.Context() sl := &Slice{ config: chainConfig, @@ -158,9 +158,8 @@ func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLooku // Append takes a proposed header and constructs a local block and attempts to hierarchically append it to the block graph. // If this is called from a dominant context a domTerminus must be provided else a common.Hash{} should be used and domOrigin should be set to true. // Return of this function is the Etxs generated in the Zone Block, subReorg bool that tells dom if should be mined on, setHead bool that determines if we should set the block as the current head and the error -func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { +func (sl *Slice) Append(header *types.WorkObject, domPendingHeader *types.WorkObject, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { start := time.Now() - nodeCtx := sl.NodeCtx() if sl.hc.IsGenesisHash(header.Hash()) { @@ -200,7 +199,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do // Don't append the block which already exists in the database. if sl.hc.HasHeader(header.Hash(), header.NumberU64(nodeCtx)) && (sl.hc.GetTerminiByHash(header.Hash()) != nil) { sl.logger.WithField("hash", header.Hash()).Debug("Block has already been appended") - return nil, false, false, nil + return nil, false, false, ErrKnownBlock } time1 := common.PrettyDuration(time.Since(start)) // This is to prevent a crash when we try to insert blocks before domClient is on. @@ -288,7 +287,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do if nodeCtx != common.ZONE_CTX { // How to get the sub pending etxs if not running the full node?. if sl.subClients[location.SubIndex(sl.NodeLocation())] != nil { - subPendingEtxs, subReorg, setHead, err = sl.subClients[location.SubIndex(sl.NodeLocation())].Append(context.Background(), header, block.SubManifest(), pendingHeaderWithTermini.Header(), domTerminus, true, newInboundEtxs) + subPendingEtxs, subReorg, setHead, err = sl.subClients[location.SubIndex(sl.NodeLocation())].Append(context.Background(), header, block.Manifest(), pendingHeaderWithTermini.WorkObject(), domTerminus, true, newInboundEtxs) if err != nil { return nil, false, false, err } @@ -341,10 +340,10 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do rawdb.WriteInboundEtxs(sl.sliceDb, block.Hash(), newInboundEtxs) } - setHead = sl.poem(sl.engine.TotalLogS(sl.hc, block.Header()), sl.engine.TotalLogS(sl.hc, sl.hc.CurrentHeader())) + setHead = sl.poem(sl.engine.TotalLogS(sl.hc, block), sl.engine.TotalLogS(sl.hc, sl.hc.CurrentHeader())) if subReorg || (sl.hc.CurrentHeader().NumberU64(nodeCtx) < block.NumberU64(nodeCtx)+c_currentStateComputeWindow) { - err := sl.hc.SetCurrentState(block.Header()) + err := sl.hc.SetCurrentState(block) if err != nil { sl.logger.WithFields(log.Fields{ "err": err, @@ -373,9 +372,9 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do } } sl.logger.WithFields(log.Fields{ - "NumberArray": pendingHeaderWithTermini.Header().NumberArray(), - "Number": pendingHeaderWithTermini.Header().Number(nodeCtx), - "ParentHash": pendingHeaderWithTermini.Header().ParentHash(nodeCtx), + "NumberArray": pendingHeaderWithTermini.WorkObject().NumberArray(), + "Number": pendingHeaderWithTermini.WorkObject().Number(nodeCtx), + "ParentHash": pendingHeaderWithTermini.WorkObject().ParentHash(nodeCtx), "Terminus": pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation()), }).Info("Choosing phHeader Append") sl.WriteBestPhKey(pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation())) @@ -388,7 +387,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do } if setHead { - sl.hc.SetCurrentHeader(block.Header()) + sl.hc.SetCurrentHeader(block) } else if !setHead && nodeCtx == common.ZONE_CTX && sl.hc.ProcessingState() { sl.logger.WithFields(log.Fields{ "hash": block.Hash(), @@ -396,7 +395,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do "location": block.Location(), "parentHash": block.ParentHash(nodeCtx), }).Debug("Found uncle") - sl.hc.chainSideFeed.Send(ChainSideEvent{Blocks: []*types.Block{block}, ResetUncles: false}) + sl.hc.chainSideFeed.Send(ChainSideEvent{Blocks: []*types.WorkObject{block}, ResetUncles: false}) } if subReorg { @@ -438,19 +437,20 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do }).Info("Times during sub append") sl.logger.WithFields(log.Fields{ - "number": block.Header().NumberArray(), - "hash": block.Hash(), - "difficulty": block.Header().Difficulty(), - "uncles": len(block.Uncles()), - "txs": len(block.Transactions()), - "etxs": len(block.ExtTransactions()), - "utxos": len(block.QiTransactions()), - "gas": block.GasUsed(), - "gasLimit": block.GasLimit(), - "evmRoot": block.EVMRoot(), - "order": order, - "location": block.Header().Location(), - "elapsed": common.PrettyDuration(time.Since(start)), + "dom number": block.Header().NumberArray(), + "zone number": block.Number(common.ZONE_CTX), + "hash": block.Hash(), + "difficulty": block.Difficulty(), + "uncles": len(block.Uncles()), + "txs": len(block.Transactions()), + "etxs": len(block.ExtTransactions()), + "utxos": len(block.QiTransactions()), + "gas": block.GasUsed(), + "gasLimit": block.GasLimit(), + "evmRoot": block.EVMRoot(), + "order": order, + "location": block.Location(), + "elapsed": common.PrettyDuration(time.Since(start)), }).Info("Appended new block") if nodeCtx == common.ZONE_CTX { @@ -474,7 +474,7 @@ func (sl *Slice) miningStrategy(bestPh types.PendingHeader, pendingHeader types. if bestPh.Header() == nil { // This is the case where we try to append the block before we have not initialized the bestPh return true } - subReorg := sl.poem(sl.engine.TotalLogPhS(pendingHeader.Header()), sl.engine.TotalLogPhS(bestPh.Header())) + subReorg := sl.poem(sl.engine.TotalLogPhS(pendingHeader.WorkObject()), sl.engine.TotalLogPhS(bestPh.WorkObject())) return subReorg } @@ -483,15 +483,15 @@ func (sl *Slice) ProcessingState() bool { } // relayPh sends pendingHeaderWithTermini to subordinates -func (sl *Slice) relayPh(block *types.Block, pendingHeaderWithTermini types.PendingHeader, domOrigin bool, location common.Location, subReorg bool) { +func (sl *Slice) relayPh(block *types.WorkObject, pendingHeaderWithTermini types.PendingHeader, domOrigin bool, location common.Location, subReorg bool) { nodeCtx := sl.NodeCtx() if nodeCtx == common.ZONE_CTX && sl.ProcessingState() { // Send an empty header to miner bestPh, exists := sl.readPhCache(sl.bestPhKey) if exists { - bestPh.Header().SetLocation(sl.NodeLocation()) - sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header()) + bestPh.WorkObject().WorkObjectHeader().SetLocation(sl.NodeLocation()) + sl.miner.worker.pendingHeaderFeed.Send(bestPh.WorkObject()) return } else { sl.logger.WithField("bestPhKey", sl.bestPhKey).Warn("Pending Header for Best ph key does not exist") @@ -499,7 +499,7 @@ func (sl *Slice) relayPh(block *types.Block, pendingHeaderWithTermini types.Pend } else if !domOrigin && subReorg { for _, i := range sl.randomRelayArray() { if sl.subClients[i] != nil { - sl.subClients[i].SubRelayPendingHeader(context.Background(), pendingHeaderWithTermini, pendingHeaderWithTermini.Header().ParentEntropy(nodeCtx), location, subReorg, nodeCtx) + sl.subClients[i].SubRelayPendingHeader(context.Background(), pendingHeaderWithTermini, pendingHeaderWithTermini.WorkObject().ParentEntropy(nodeCtx), location, subReorg, nodeCtx) } } } @@ -544,11 +544,11 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH for _, i := range sl.randomRelayArray() { if sl.subClients[i] != nil { sl.logger.WithFields(log.Fields{ - "parentHash": newPh.Header().ParentHash(nodeCtx), - "number": newPh.Header().NumberArray(), + "parentHash": newPh.WorkObject().ParentHash(nodeCtx), + "number": newPh.WorkObject().NumberArray(), "newTermini": newPh.Termini().SubTerminiAtIndex(i), }).Info("SubRelay in UpdateDom") - sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx) + sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.WorkObject().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx) } } } else { @@ -563,7 +563,7 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH "location": location, }).Info("UpdateDom needs to updateDom") if sl.domClient != nil { - go sl.domClient.UpdateDom(context.Background(), oldDomTerminus, types.NewPendingHeader(pendingHeader.Header(), newPh.Termini()), location) + go sl.domClient.UpdateDom(context.Background(), oldDomTerminus, types.NewPendingHeader(pendingHeader.WorkObject(), newPh.Termini()), location) } else { // Can update sl.WriteBestPhKey(newDomTerminus) @@ -572,11 +572,11 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH for _, i := range sl.randomRelayArray() { if sl.subClients[i] != nil { sl.logger.WithFields(log.Fields{ - "parentHash": newPh.Header().ParentHash(nodeCtx), - "number": newPh.Header().NumberArray(), + "parentHash": newPh.WorkObject().ParentHash(nodeCtx), + "number": newPh.WorkObject().NumberArray(), "newTermini": newPh.Termini().SubTerminiAtIndex(i), }).Info("SubRelay in UpdateDom") - sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx) + sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.WorkObject().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx) } } } else { @@ -604,7 +604,7 @@ func (sl *Slice) randomRelayArray() []int { func (sl *Slice) asyncPendingHeaderLoop() { // Subscribe to the AsyncPh updates from the worker - sl.asyncPhCh = make(chan *types.Header, c_asyncPhUpdateChanSize) + sl.asyncPhCh = make(chan *types.WorkObject, c_asyncPhUpdateChanSize) sl.asyncPhSub = sl.miner.worker.SubscribeAsyncPendingHeader(sl.asyncPhCh) for { @@ -615,8 +615,9 @@ func (sl *Slice) asyncPendingHeaderLoop() { sl.phCacheMu.Unlock() bestPh, exists := sl.readPhCache(sl.bestPhKey) if exists { - bestPh.Header().SetLocation(sl.NodeLocation()) - sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header()) + bestPh.WorkObject().WorkObjectHeader().SetLocation(sl.NodeLocation()) + sl.writePhCache(sl.bestPhKey, bestPh) + sl.miner.worker.pendingHeaderFeed.Send(bestPh.WorkObject()) } case <-sl.asyncPhSub.Err(): return @@ -651,6 +652,10 @@ func (sl *Slice) readPhCache(hash common.Hash) (types.PendingHeader, bool) { // Write the phCache func (sl *Slice) writePhCache(hash common.Hash, pendingHeader types.PendingHeader) { + sl.miner.worker.AddPendingWorkObjectBody(pendingHeader.WorkObject()) + if (pendingHeader.Header().ManifestHash(common.ZONE_CTX) == common.Hash{}) { + panic("manifest is nil") + } sl.phCache.Add(hash, pendingHeader) rawdb.WritePendingHeader(sl.sliceDb, hash, pendingHeader) } @@ -663,9 +668,9 @@ func (sl *Slice) WriteBestPhKey(hash common.Hash) { } // Generate a slice pending header -func (sl *Slice) generateSlicePendingHeader(block *types.Block, newTermini types.Termini, domPendingHeader *types.Header, domOrigin bool, subReorg bool, fill bool) (types.PendingHeader, error) { +func (sl *Slice) generateSlicePendingHeader(block *types.WorkObject, newTermini types.Termini, domPendingHeader *types.WorkObject, domOrigin bool, subReorg bool, fill bool) (types.PendingHeader, error) { nodeCtx := sl.NodeLocation().Context() - var localPendingHeader *types.Header + var localPendingHeader *types.WorkObject var err error if subReorg { // Upate the local pending header @@ -676,31 +681,31 @@ func (sl *Slice) generateSlicePendingHeader(block *types.Block, newTermini types } else { // Just compute the necessary information for the pending Header // i.e ParentHash field, Number and writing manifest to the disk - localPendingHeader = types.EmptyHeader() + localPendingHeader = types.EmptyHeader(sl.NodeCtx()) localPendingHeader.SetParentHash(block.Hash(), nodeCtx) localPendingHeader.SetNumber(big.NewInt(int64(block.NumberU64(nodeCtx))+1), nodeCtx) - localPendingHeader.SetParentEntropy(sl.engine.TotalLogS(sl.hc, block.Header()), nodeCtx) + localPendingHeader.Header().SetParentEntropy(sl.engine.TotalLogS(sl.hc, block), nodeCtx) if nodeCtx != common.PRIME_CTX { if domOrigin { - localPendingHeader.SetParentDeltaS(big.NewInt(0), nodeCtx) + localPendingHeader.Header().SetParentDeltaS(big.NewInt(0), nodeCtx) } else { - localPendingHeader.SetParentDeltaS(sl.engine.DeltaLogS(sl.hc, block.Header()), nodeCtx) + localPendingHeader.Header().SetParentDeltaS(sl.engine.DeltaLogS(sl.hc, block), nodeCtx) } } - manifestHash := sl.miner.worker.ComputeManifestHash(block.Header()) - localPendingHeader.SetManifestHash(manifestHash, nodeCtx) + manifestHash := sl.miner.worker.ComputeManifestHash(block) + localPendingHeader.Header().SetManifestHash(manifestHash, nodeCtx) } // Combine subordinates pending header with local pending header pendingHeaderWithTermini := sl.computePendingHeader(types.NewPendingHeader(localPendingHeader, newTermini), domPendingHeader, domOrigin) - pendingHeaderWithTermini.Header().SetLocation(block.Header().Location()) - + pendingHeaderWithTermini.WorkObject().WorkObjectHeader().SetLocation(block.Location()) + pendingHeaderWithTermini.WorkObject().Body().SetHeader(pendingHeaderWithTermini.Header()) return pendingHeaderWithTermini, nil } // CollectNewlyConfirmedEtxs collects all newly confirmed ETXs since the last coincident with the given location -func (sl *Slice) CollectNewlyConfirmedEtxs(block *types.Block, location common.Location) (types.Transactions, types.Transactions, error) { +func (sl *Slice) CollectNewlyConfirmedEtxs(block *types.WorkObject, location common.Location) (types.Transactions, types.Transactions, error) { nodeLocation := sl.NodeLocation() nodeCtx := sl.NodeCtx() // Collect rollup of ETXs from the subordinate node's manifest @@ -765,7 +770,7 @@ func (sl *Slice) CollectNewlyConfirmedEtxs(block *types.Block, location common.L } // PCRC previous coincidence reference check makes sure there are not any cyclic references in the graph and calculates new termini and the block terminus -func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus common.Hash, domOrigin bool) (common.Hash, types.Termini, error) { +func (sl *Slice) pcrc(batch ethdb.Batch, header *types.WorkObject, domTerminus common.Hash, domOrigin bool) (common.Hash, types.Termini, error) { nodeLocation := sl.NodeLocation() nodeCtx := sl.NodeCtx() location := header.Location() @@ -828,9 +833,9 @@ func (sl *Slice) poem(externS *big.Int, currentS *big.Int) bool { } // GetPendingHeader is used by the miner to request the current pending header -func (sl *Slice) GetPendingHeader() (*types.Header, error) { +func (sl *Slice) GetPendingHeader() (*types.WorkObject, error) { if ph, exists := sl.readPhCache(sl.bestPhKey); exists { - return ph.Header(), nil + return ph.WorkObject(), nil } else { return nil, errors.New("empty pending header") } @@ -889,7 +894,7 @@ func (sl *Slice) GetPendingEtxsRollupFromSub(hash common.Hash, location common.L if err != nil { return types.PendingEtxsRollup{}, err } - return types.PendingEtxsRollup{Header: block.Header(), EtxsRollup: subRollup}, nil + return types.PendingEtxsRollup{Header: block, EtxsRollup: subRollup}, nil } } return types.PendingEtxsRollup{}, ErrPendingEtxNotFound @@ -919,7 +924,7 @@ func (sl *Slice) GetPendingEtxsFromSub(hash common.Hash, location common.Locatio } block := sl.hc.GetBlockByHash(hash) if block != nil { - return types.PendingEtxs{Header: block.Header(), Etxs: block.ExtTransactions()}, nil + return types.PendingEtxs{Header: block, Etxs: block.ExtTransactions()}, nil } return types.PendingEtxs{}, ErrPendingEtxNotFound } @@ -963,21 +968,21 @@ func (sl *Slice) SubRelayPendingHeader(pendingHeader types.PendingHeader, newEnt if !bytes.Equal(location, sl.NodeLocation()) { bestPh, exists := sl.readPhCache(sl.bestPhKey) if exists { - bestPh.Header().SetLocation(sl.NodeLocation()) - sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header()) + bestPh.WorkObject().WorkObjectHeader().SetLocation(sl.NodeLocation()) + sl.miner.worker.pendingHeaderFeed.Send(bestPh.WorkObject()) } } } } // computePendingHeader takes in an localPendingHeaderWithTermini and updates the pending header on the same terminus if the number is greater -func (sl *Slice) computePendingHeader(localPendingHeaderWithTermini types.PendingHeader, domPendingHeader *types.Header, domOrigin bool) types.PendingHeader { +func (sl *Slice) computePendingHeader(localPendingHeaderWithTermini types.PendingHeader, domPendingHeader *types.WorkObject, domOrigin bool) types.PendingHeader { nodeCtx := sl.NodeCtx() var cachedPendingHeaderWithTermini types.PendingHeader hash := localPendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation()) cachedPendingHeaderWithTermini, exists := sl.readPhCache(hash) - var newPh *types.Header + var newPh *types.WorkObject if exists { sl.logger.WithFields(log.Fields{ @@ -986,15 +991,15 @@ func (sl *Slice) computePendingHeader(localPendingHeaderWithTermini types.Pendin "termini": cachedPendingHeaderWithTermini.Termini(), }).Debug("computePendingHeader") if domOrigin { - newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), domPendingHeader, nodeCtx, true) - return types.NewPendingHeader(types.CopyHeader(newPh), localPendingHeaderWithTermini.Termini()) + newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.WorkObject(), domPendingHeader, nodeCtx, true) + return types.NewPendingHeader(types.CopyWorkObject(newPh), localPendingHeaderWithTermini.Termini()) } - newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), cachedPendingHeaderWithTermini.Header(), nodeCtx, true) - return types.NewPendingHeader(types.CopyHeader(newPh), localPendingHeaderWithTermini.Termini()) + newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.WorkObject(), cachedPendingHeaderWithTermini.WorkObject(), nodeCtx, true) + return types.NewPendingHeader(types.CopyWorkObject(newPh), localPendingHeaderWithTermini.Termini()) } else { if domOrigin { - newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), domPendingHeader, nodeCtx, true) - return types.NewPendingHeader(types.CopyHeader(newPh), localPendingHeaderWithTermini.Termini()) + newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.WorkObject(), domPendingHeader, nodeCtx, true) + return types.NewPendingHeader(types.CopyWorkObject(newPh), localPendingHeaderWithTermini.Termini()) } return localPendingHeaderWithTermini } @@ -1011,9 +1016,9 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini localPendingHeader, exists := sl.readPhCache(hash) if exists { - combinedPendingHeader := types.CopyHeader(localPendingHeader.Header()) + combinedPendingHeader := types.CopyWorkObject(localPendingHeader.WorkObject()) for _, i := range indices { - combinedPendingHeader = sl.combinePendingHeader(pendingHeader.Header(), combinedPendingHeader, i, false) + combinedPendingHeader = sl.combinePendingHeader(pendingHeader.WorkObject(), combinedPendingHeader, i, false) } localTermini := localPendingHeader.Termini() @@ -1054,11 +1059,11 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini }).Info("Choosing phHeader pickPhHead") sl.WriteBestPhKey(localPendingHeader.Termini().DomTerminus(nodeLocation)) } else { - block := sl.hc.GetBlockByHash(localPendingHeader.Header().ParentHash(nodeCtx)) + block := sl.hc.GetBlockByHash(localPendingHeader.WorkObject().ParentHash(nodeCtx)) if block != nil { // setting the current state will help speed the process of append // after mining this block since the state will already be computed - err := sl.hc.SetCurrentState(block.Header()) + err := sl.hc.SetCurrentState(block) if err != nil { sl.logger.WithFields(log.Fields{ "Hash": block.Hash(), @@ -1071,15 +1076,15 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini sl.logger.WithField("err", err).Error("Error generating slice pending header") return err } - combinedPendingHeader = types.CopyHeader(newPendingHeader.Header()) + combinedPendingHeader = types.CopyWorkObject(newPendingHeader.WorkObject()) sl.logger.WithFields(log.Fields{ - "NumberArray": combinedPendingHeader.NumberArray(), + "NumberArray": combinedPendingHeader.Header().NumberArray(), "ParentHash": combinedPendingHeader.ParentHash(nodeCtx), "Terminus": localPendingHeader.Termini().DomTerminus(nodeLocation), }).Info("Choosing phHeader pickPhHead") sl.WriteBestPhKey(localPendingHeader.Termini().DomTerminus(nodeLocation)) } else { - sl.logger.WithField("hash", localPendingHeader.Header().ParentHash(nodeCtx)).Error("Unable to set the current header after the cord update") + sl.logger.WithField("hash", localPendingHeader.WorkObject().ParentHash(nodeCtx)).Error("Unable to set the current header after the cord update") } } } @@ -1090,8 +1095,8 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini } sl.logger.WithFields(log.Fields{ "hash": hash, - "pendingHeaderNumber": pendingHeader.Header().NumberArray(), - "parentHash": pendingHeader.Header().ParentHash(nodeCtx), + "pendingHeaderNumber": pendingHeader.WorkObject().NumberArray(), + "parentHash": pendingHeader.WorkObject().ParentHash(nodeCtx), "terminiIndex": terminiIndex, "indices": indices, }).Warn("Pending header not found in cache") @@ -1099,7 +1104,7 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini } // updatePhCache updates cache given a pendingHeaderWithTermini with the terminus used as the key. -func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inSlice bool, localHeader *types.Header, subReorg bool, location common.Location) { +func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inSlice bool, localHeader *types.WorkObject, subReorg bool, location common.Location) { nodeLocation := sl.NodeLocation() nodeCtx := sl.NodeCtx() @@ -1112,7 +1117,7 @@ func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inS pendingHeaderWithTermini, exists = sl.readPhCache(termini.DomTerminus(nodeLocation)) if exists { - pendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localHeader, pendingHeaderWithTermini.Header(), common.ZONE_CTX, true)) + pendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localHeader, pendingHeaderWithTermini.WorkObject(), common.ZONE_CTX, true)) } bestPh, exists := sl.readPhCache(sl.bestPhKey) @@ -1135,7 +1140,7 @@ func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inS if exists { cachedTermini = types.CopyTermini(ph.Termini()) } else { - parentHeader := sl.hc.GetHeaderOrCandidateByHash(pendingHeaderWithTermini.Header().ParentHash(nodeCtx)) + parentHeader := sl.hc.GetHeaderOrCandidateByHash(pendingHeaderWithTermini.WorkObject().ParentHash(nodeCtx)) if sl.hc.IsGenesisHash(parentHeader.Hash()) { ph, _ = sl.readPhCache(parentHeader.Hash()) cachedTermini = types.CopyTermini(ph.Termini()) @@ -1159,9 +1164,10 @@ func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inS cachedTermini.SetSubTermini(termini.SubTermini()) // Update the pendingHeader Cache - deepCopyPendingHeaderWithTermini := types.NewPendingHeader(types.CopyHeader(pendingHeaderWithTermini.Header()), cachedTermini) - deepCopyPendingHeaderWithTermini.Header().SetLocation(sl.NodeLocation()) - deepCopyPendingHeaderWithTermini.Header().SetTime(uint64(time.Now().Unix())) + deepCopyPendingHeaderWithTermini := types.NewPendingHeader(pendingHeaderWithTermini.WorkObject(), cachedTermini) + deepCopyPendingHeaderWithTermini.WorkObject().WorkObjectHeader().SetLocation(sl.NodeLocation()) + deepCopyPendingHeaderWithTermini.WorkObject().WorkObjectHeader().SetTime(uint64(time.Now().Unix())) + deepCopyPendingHeaderWithTermini.WorkObject().WorkObjectHeader().SetHeaderHash(deepCopyPendingHeaderWithTermini.Header().Hash()) if subReorg || !exists { sl.writePhCache(deepCopyPendingHeaderWithTermini.Termini().DomTerminus(nodeLocation), deepCopyPendingHeaderWithTermini) @@ -1210,15 +1216,9 @@ func (sl *Slice) init() error { sl.WriteBestPhKey(genesisHash) // Create empty pending ETX entry for genesis block -- genesis may not emit ETXs emptyPendingEtxs := types.Transactions{} - err := sl.hc.AddPendingEtxs(types.PendingEtxs{genesisHeader, emptyPendingEtxs}) - if err != nil { - return err - } - err = sl.AddPendingEtxsRollup(types.PendingEtxsRollup{genesisHeader, emptyPendingEtxs}) - if err != nil { - return err - } - err = sl.hc.AddBloom(types.Bloom{}, genesisHeader.Hash()) + rawdb.WritePendingEtxs(sl.sliceDb, types.PendingEtxs{Header: genesisHeader, Etxs: emptyPendingEtxs}) + rawdb.WritePendingEtxsRollup(sl.sliceDb, types.PendingEtxsRollup{Header: genesisHeader, EtxsRollup: emptyPendingEtxs}) + err := sl.hc.AddBloom(types.Bloom{}, genesisHeader.Hash()) if err != nil { return err } @@ -1240,34 +1240,11 @@ func (sl *Slice) init() error { // constructLocalBlock takes a header and construct the Block locally by getting the body // from the candidate body db. This method is used when peers give the block as a placeholder // for the body. -func (sl *Slice) ConstructLocalBlock(header *types.Header) (*types.Block, error) { - pendingBlockBody := rawdb.ReadBody(sl.sliceDb, header.Hash(), header.NumberU64(sl.NodeCtx()), sl.NodeLocation()) - if pendingBlockBody == nil { +func (sl *Slice) ConstructLocalBlock(header *types.WorkObject) (*types.WorkObject, error) { + block := rawdb.ReadWorkObject(sl.sliceDb, header.Hash(), types.BlockObject) + if block == nil { return nil, ErrBodyNotFound } - // Load uncles because they are not included in the block response. - txs := make([]*types.Transaction, len(pendingBlockBody.Transactions)) - for i, tx := range pendingBlockBody.Transactions { - txs[i] = tx - } - uncles := make([]*types.Header, len(pendingBlockBody.Uncles)) - for i, uncle := range pendingBlockBody.Uncles { - uncles[i] = uncle - sl.logger.WithField("hash", uncle.Hash()).Debug("Pending Block uncle") - } - etxs := make([]*types.Transaction, len(pendingBlockBody.ExtTransactions)) - for i, etx := range pendingBlockBody.ExtTransactions { - etxs[i] = etx - } - subManifest := make(types.BlockManifest, len(pendingBlockBody.SubManifest)) - for i, blockHash := range pendingBlockBody.SubManifest { - subManifest[i] = blockHash - } - interlinkHashes := make(common.Hashes, len(pendingBlockBody.InterlinkHashes)) - for i, interlinkhash := range pendingBlockBody.InterlinkHashes { - interlinkHashes[i] = interlinkhash - } - block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest, interlinkHashes) if err := sl.validator.ValidateBody(block); err != nil { return block, err } else { @@ -1278,45 +1255,61 @@ func (sl *Slice) ConstructLocalBlock(header *types.Header) (*types.Block, error) // constructLocalMinedBlock takes a header and construct the Block locally by getting the block // body from the workers pendingBlockBodyCache. This method is used when the miner sends in the // header. -func (sl *Slice) ConstructLocalMinedBlock(header *types.Header) (*types.Block, error) { +func (sl *Slice) ConstructLocalMinedBlock(wo *types.WorkObject) (*types.WorkObject, error) { nodeCtx := sl.NodeLocation().Context() - var pendingBlockBody *types.Body + var pendingBlockBody *types.WorkObject if nodeCtx == common.ZONE_CTX { - pendingBlockBody = sl.GetPendingBlockBody(header) + pendingBlockBody = sl.GetPendingBlockBody(wo) if pendingBlockBody == nil { + sl.logger.WithFields(log.Fields{"wo.Hash": wo.Hash(), + "wo.Header": wo.HeaderHash(), + "wo.ParentHash": wo.ParentHash(common.ZONE_CTX), + "wo.Difficulty()": wo.Difficulty(), + "wo.Location()": wo.Location(), + }).Warn("Pending Block Body not found") return nil, ErrBodyNotFound } } else { // If the context is PRIME, there is the interlink hashes that needs to be returned from the database var interlinkHashes common.Hashes if nodeCtx == common.PRIME_CTX { - interlinkHashes = rawdb.ReadInterlinkHashes(sl.sliceDb, header.ParentHash(common.PRIME_CTX)) + interlinkHashes = rawdb.ReadInterlinkHashes(sl.sliceDb, wo.ParentHash(common.PRIME_CTX)) } - pendingBlockBody = &types.Body{InterlinkHashes: interlinkHashes} + wo.Body().SetUncles(nil) + wo.Body().SetTransactions(nil) + wo.Body().SetExtTransactions(nil) + wo.Body().SetInterlinkHashes(interlinkHashes) + pendingBlockBody = types.NewWorkObject(wo.WorkObjectHeader(), wo.Body(), nil, types.BlockObject) } // Load uncles because they are not included in the block response. - txs := make([]*types.Transaction, len(pendingBlockBody.Transactions)) - for i, tx := range pendingBlockBody.Transactions { + txs := make([]*types.Transaction, len(pendingBlockBody.Transactions())) + for i, tx := range pendingBlockBody.Transactions() { txs[i] = tx } - uncles := make([]*types.Header, len(pendingBlockBody.Uncles)) - for i, uncle := range pendingBlockBody.Uncles { + uncles := make([]*types.WorkObjectHeader, len(pendingBlockBody.Uncles())) + for i, uncle := range pendingBlockBody.Uncles() { uncles[i] = uncle sl.logger.WithField("hash", uncle.Hash()).Debug("Pending Block uncle") } - etxs := make([]*types.Transaction, len(pendingBlockBody.ExtTransactions)) - for i, etx := range pendingBlockBody.ExtTransactions { + etxs := make(types.Transactions, len(pendingBlockBody.ExtTransactions())) + for i, etx := range pendingBlockBody.ExtTransactions() { etxs[i] = etx } - subManifest := make(types.BlockManifest, len(pendingBlockBody.SubManifest)) - for i, blockHash := range pendingBlockBody.SubManifest { + subManifest := make(types.BlockManifest, len(pendingBlockBody.Manifest())) + for i, blockHash := range pendingBlockBody.Manifest() { subManifest[i] = blockHash } - interlinkhashes := make(common.Hashes, len(pendingBlockBody.InterlinkHashes)) - for i, interlinkhash := range pendingBlockBody.InterlinkHashes { - interlinkhashes[i] = interlinkhash + interlinkHashes := make(common.Hashes, len(pendingBlockBody.InterlinkHashes())) + for i, interlinkhash := range pendingBlockBody.InterlinkHashes() { + interlinkHashes[i] = interlinkhash } - block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest, interlinkhashes) + pendingBlockBody.Body().SetTransactions(txs) + pendingBlockBody.Body().SetUncles(uncles) + pendingBlockBody.Body().SetExtTransactions(etxs) + pendingBlockBody.Body().SetManifest(subManifest) + pendingBlockBody.Body().SetInterlinkHashes(interlinkHashes) + block := types.NewWorkObject(wo.WorkObjectHeader(), pendingBlockBody.Body(), nil, types.BlockObject) + if err := sl.validator.ValidateBody(block); err != nil { return block, err } else { @@ -1325,43 +1318,49 @@ func (sl *Slice) ConstructLocalMinedBlock(header *types.Header) (*types.Block, e } // combinePendingHeader updates the pending header at the given index with the value from given header. -func (sl *Slice) combinePendingHeader(header *types.Header, slPendingHeader *types.Header, index int, inSlice bool) *types.Header { +func (sl *Slice) combinePendingHeader(header *types.WorkObject, slPendingHeader *types.WorkObject, index int, inSlice bool) *types.WorkObject { // copying the slPendingHeader and updating the copy to remove any shared memory access issues - combinedPendingHeader := types.CopyHeader(slPendingHeader) + combinedPendingHeader := types.CopyWorkObject(slPendingHeader) combinedPendingHeader.SetParentHash(header.ParentHash(index), index) combinedPendingHeader.SetNumber(header.Number(index), index) - combinedPendingHeader.SetManifestHash(header.ManifestHash(index), index) - combinedPendingHeader.SetParentEntropy(header.ParentEntropy(index), index) - combinedPendingHeader.SetParentDeltaS(header.ParentDeltaS(index), index) - combinedPendingHeader.SetParentUncledSubDeltaS(header.ParentUncledSubDeltaS(index), index) + combinedPendingHeader.Header().SetManifestHash(header.ManifestHash(index), index) + combinedPendingHeader.Header().SetParentEntropy(header.ParentEntropy(index), index) + combinedPendingHeader.Header().SetParentDeltaS(header.ParentDeltaS(index), index) + combinedPendingHeader.Header().SetParentUncledSubDeltaS(header.ParentUncledSubDeltaS(index), index) if index == common.PRIME_CTX { - combinedPendingHeader.SetEfficiencyScore(header.EfficiencyScore()) - combinedPendingHeader.SetThresholdCount(header.ThresholdCount()) - combinedPendingHeader.SetExpansionNumber(header.ExpansionNumber()) - combinedPendingHeader.SetEtxEligibleSlices(header.EtxEligibleSlices()) - combinedPendingHeader.SetInterlinkRootHash(header.InterlinkRootHash()) + combinedPendingHeader.Header().SetEfficiencyScore(header.EfficiencyScore()) + combinedPendingHeader.Header().SetThresholdCount(header.ThresholdCount()) + combinedPendingHeader.Header().SetExpansionNumber(header.ExpansionNumber()) + combinedPendingHeader.Header().SetEtxEligibleSlices(header.EtxEligibleSlices()) + combinedPendingHeader.Header().SetInterlinkRootHash(header.InterlinkRootHash()) } if inSlice { - combinedPendingHeader.SetEtxRollupHash(header.EtxRollupHash()) - combinedPendingHeader.SetDifficulty(header.Difficulty()) - combinedPendingHeader.SetUncledS(header.UncledS()) - combinedPendingHeader.SetUncleHash(header.UncleHash()) - combinedPendingHeader.SetTxHash(header.TxHash()) - combinedPendingHeader.SetEtxHash(header.EtxHash()) - combinedPendingHeader.SetEtxSetHash(header.EtxSetHash()) - combinedPendingHeader.SetReceiptHash(header.ReceiptHash()) - combinedPendingHeader.SetEVMRoot(header.EVMRoot()) - combinedPendingHeader.SetUTXORoot(header.UTXORoot()) - combinedPendingHeader.SetCoinbase(header.Coinbase()) - combinedPendingHeader.SetBaseFee(header.BaseFee()) - combinedPendingHeader.SetGasLimit(header.GasLimit()) - combinedPendingHeader.SetGasUsed(header.GasUsed()) - combinedPendingHeader.SetExtra(header.Extra()) - combinedPendingHeader.SetPrimeTerminus(header.PrimeTerminus()) - } + combinedPendingHeader.Header().SetEtxRollupHash(header.EtxRollupHash()) + combinedPendingHeader.WorkObjectHeader().SetDifficulty(header.Difficulty()) + combinedPendingHeader.Header().SetUncledS(header.Header().UncledS()) + combinedPendingHeader.Header().SetUncleHash(header.UncleHash()) + combinedPendingHeader.Header().SetTxHash(header.Header().TxHash()) + combinedPendingHeader.Header().SetEtxHash(header.EtxHash()) + combinedPendingHeader.Header().SetEtxSetHash(header.EtxSetHash()) + combinedPendingHeader.Header().SetReceiptHash(header.ReceiptHash()) + combinedPendingHeader.Header().SetEVMRoot(header.EVMRoot()) + combinedPendingHeader.Header().SetUTXORoot(header.UTXORoot()) + combinedPendingHeader.Header().SetCoinbase(header.Coinbase()) + combinedPendingHeader.Header().SetBaseFee(header.BaseFee()) + combinedPendingHeader.Header().SetGasLimit(header.GasLimit()) + combinedPendingHeader.Header().SetGasUsed(header.GasUsed()) + combinedPendingHeader.Header().SetExtra(header.Extra()) + combinedPendingHeader.Header().SetPrimeTerminus(header.PrimeTerminus()) + combinedPendingHeader.Body().SetTransactions(header.Transactions()) + combinedPendingHeader.Body().SetExtTransactions(header.ExtTransactions()) + combinedPendingHeader.Body().SetUncles(header.Uncles()) + combinedPendingHeader.Body().SetManifest(header.Manifest()) + } + + combinedPendingHeader.Body().SetHeader(combinedPendingHeader.Header()) return combinedPendingHeader } @@ -1397,24 +1396,24 @@ func (sl *Slice) ActiveSlices() []common.Location { return activeSlices } -func (sl *Slice) WriteGenesisBlock(block *types.Block, location common.Location) { +func (sl *Slice) WriteGenesisBlock(block *types.WorkObject, location common.Location) { rawdb.WriteManifest(sl.sliceDb, block.Hash(), types.BlockManifest{block.Hash()}) sl.WriteBestPhKey(block.Hash()) // Create empty pending ETX entry for genesis block -- genesis may not emit ETXs emptyPendingEtxs := types.Transactions{} - sl.hc.AddPendingEtxs(types.PendingEtxs{block.Header(), emptyPendingEtxs}) - sl.AddPendingEtxsRollup(types.PendingEtxsRollup{block.Header(), emptyPendingEtxs}) + sl.hc.AddPendingEtxs(types.PendingEtxs{block, emptyPendingEtxs}) + sl.AddPendingEtxsRollup(types.PendingEtxsRollup{block, emptyPendingEtxs}) sl.hc.AddBloom(types.Bloom{}, block.Hash()) - sl.hc.currentHeader.Store(block.Header()) + sl.hc.currentHeader.Store(block) rawdb.WriteEtxSet(sl.sliceDb, block.Hash(), block.NumberU64(sl.NodeCtx()), types.NewEtxSet()) } // NewGenesisPendingHeader creates a pending header on the genesis block -func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header, domTerminus common.Hash, genesisHash common.Hash) { +func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.WorkObject, domTerminus common.Hash, genesisHash common.Hash) error { nodeCtx := sl.NodeLocation().Context() if nodeCtx == common.ZONE_CTX && !sl.hc.Empty() { - return + return nil } // Wait until the subclients are all initialized if nodeCtx != common.ZONE_CTX { @@ -1436,7 +1435,7 @@ func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header, domTerm } // Upate the local pending header - var localPendingHeader *types.Header + var localPendingHeader *types.WorkObject var err error var termini types.Termini log.Global.Infof("NewGenesisPendingHeader location: %v, genesis hash %s", sl.NodeLocation(), genesisHash) @@ -1446,7 +1445,7 @@ func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header, domTerm sl.logger.WithFields(log.Fields{ "err": err, }).Warn("Error generating the New Genesis Pending Header") - return + return nil } termini = genesisTermini } else { @@ -1454,7 +1453,7 @@ func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header, domTerm if !exists { log.Global.Errorf("Genesis pending header not found in node location %v cache %v", sl.NodeLocation(), domTerminus) } - localPendingHeader = localPendingHeaderWithTermini.Header() + localPendingHeader = localPendingHeaderWithTermini.WorkObject() termini = localPendingHeaderWithTermini.Termini() } @@ -1464,35 +1463,41 @@ func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header, domTerm } if nodeCtx != common.ZONE_CTX { - localPendingHeader.SetCoinbase(common.Zero) + localPendingHeader.Header().SetCoinbase(common.Zero) } + localPendingHeader.WorkObjectHeader().SetLocation(common.Location{0, 0}) + if nodeCtx == common.PRIME_CTX { - domPendingHeader = types.CopyHeader(localPendingHeader) + domPendingHeader = types.CopyWorkObject(localPendingHeader) } else { domPendingHeader = sl.combinePendingHeader(localPendingHeader, domPendingHeader, nodeCtx, true) - domPendingHeader.SetLocation(sl.NodeLocation()) + domPendingHeader.WorkObjectHeader().SetLocation(sl.NodeLocation()) + domPendingHeader.WorkObjectHeader().SetHeaderHash(domPendingHeader.Body().Header().Hash()) } if nodeCtx != common.ZONE_CTX { for i, client := range sl.subClients { if client != nil { - client.NewGenesisPendingHeader(context.Background(), domPendingHeader, termini.SubTerminiAtIndex(i), genesisHash) + err = client.NewGenesisPendingHeader(context.Background(), domPendingHeader, termini.SubTerminiAtIndex(i), genesisHash) if err != nil { - return + return err } } } } if sl.hc.Empty() { - domPendingHeader.SetTime(uint64(time.Now().Unix())) + domPendingHeader.WorkObjectHeader().SetTime(uint64(time.Now().Unix())) + domPendingHeader.WorkObjectHeader().SetHeaderHash(domPendingHeader.Body().Header().Hash()) sl.writePhCache(genesisHash, types.NewPendingHeader(domPendingHeader, genesisTermini)) } + return nil } -func (sl *Slice) GetPendingBlockBody(header *types.Header) *types.Body { - return sl.miner.worker.GetPendingBlockBody(header) +func (sl *Slice) GetPendingBlockBody(wo *types.WorkObject) *types.WorkObject { + blockBody, _ := sl.miner.worker.GetPendingBlockBody(wo) + return blockBody } func (sl *Slice) SubscribeMissingBlockEvent(ch chan<- types.BlockRequest) event.Subscription { @@ -1545,7 +1550,7 @@ func (sl *Slice) loadLastState() error { bestPh := rawdb.ReadPendingHeader(sl.sliceDb, sl.bestPhKey) if bestPh != nil { sl.writePhCache(sl.bestPhKey, *bestPh) - parent := sl.hc.GetHeaderOrCandidateByHash(bestPh.Header().ParentHash(sl.NodeCtx())) + parent := sl.hc.GetHeaderOrCandidateByHash(bestPh.WorkObject().ParentHash(sl.NodeCtx())) if parent == nil { return errors.New("failed to get pending header's parent header") } @@ -1589,11 +1594,11 @@ func (sl *Slice) TxPool() *TxPool { return sl.txPool } func (sl *Slice) Miner() *Miner { return sl.miner } -func (sl *Slice) CurrentInfo(header *types.Header) bool { +func (sl *Slice) CurrentInfo(header *types.WorkObject) bool { return sl.miner.worker.CurrentInfo(header) } -func (sl *Slice) WriteBlock(block *types.Block) { +func (sl *Slice) WriteBlock(block *types.WorkObject) { sl.hc.WriteBlock(block) } @@ -1633,7 +1638,7 @@ func (sl *Slice) CheckForBadHashAndRecover() { nodeCtx := sl.NodeLocation().Context() // Lookup the bad hashes list to see if we have it in the database for _, fork := range BadHashes { - var badBlock *types.Block + var badBlock *types.WorkObject var badHash common.Hash switch nodeCtx { case common.PRIME_CTX: @@ -1656,16 +1661,16 @@ func (sl *Slice) CheckForBadHashAndRecover() { } // SetHeadBackToRecoveryState sets the heads of the whole hierarchy to the recovery state -func (sl *Slice) SetHeadBackToRecoveryState(pendingHeader *types.Header, hash common.Hash) types.PendingHeader { +func (sl *Slice) SetHeadBackToRecoveryState(pendingHeader *types.WorkObject, hash common.Hash) types.PendingHeader { nodeCtx := sl.NodeLocation().Context() if nodeCtx == common.PRIME_CTX { localPendingHeaderWithTermini := sl.ComputeRecoveryPendingHeader(hash) sl.phCache.Add(hash, localPendingHeaderWithTermini) - sl.GenerateRecoveryPendingHeader(localPendingHeaderWithTermini.Header(), localPendingHeaderWithTermini.Termini()) + sl.GenerateRecoveryPendingHeader(localPendingHeaderWithTermini.WorkObject(), localPendingHeaderWithTermini.Termini()) } else { localPendingHeaderWithTermini := sl.ComputeRecoveryPendingHeader(hash) - localPendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), pendingHeader, nodeCtx, true)) - localPendingHeaderWithTermini.Header().SetLocation(sl.NodeLocation()) + localPendingHeaderWithTermini.SetHeader(sl.combinePendingHeader(localPendingHeaderWithTermini.WorkObject(), pendingHeader, nodeCtx, true)) + localPendingHeaderWithTermini.WorkObject().WorkObjectHeader().SetLocation(sl.NodeLocation()) sl.phCache.Add(hash, localPendingHeaderWithTermini) return localPendingHeaderWithTermini } @@ -1699,7 +1704,7 @@ func (sl *Slice) cleanCacheAndDatabaseTillBlock(hash common.Hash) { var badHashes []common.Hash header := currentHeader for { - rawdb.DeleteBlock(sl.sliceDb, header.Hash(), header.NumberU64(nodeCtx)) + rawdb.DeleteWorkObject(sl.sliceDb, header.Hash(), header.NumberU64(nodeCtx), types.BlockObject) rawdb.DeleteCanonicalHash(sl.sliceDb, header.NumberU64(nodeCtx)) rawdb.DeleteHeaderNumber(sl.sliceDb, header.Hash()) rawdb.DeleteTermini(sl.sliceDb, header.Hash()) @@ -1730,7 +1735,7 @@ func (sl *Slice) cleanCacheAndDatabaseTillBlock(hash common.Hash) { } } -func (sl *Slice) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkPointHashes types.Termini) error { +func (sl *Slice) GenerateRecoveryPendingHeader(pendingHeader *types.WorkObject, checkPointHashes types.Termini) error { nodeCtx := sl.NodeCtx() regions, zones := common.GetHierarchySizeForExpansionNumber(sl.hc.currentExpansionNumber) if nodeCtx == common.PRIME_CTX { @@ -1743,7 +1748,7 @@ func (sl *Slice) GenerateRecoveryPendingHeader(pendingHeader *types.Header, chec newPendingHeader := sl.SetHeadBackToRecoveryState(pendingHeader, checkPointHashes.SubTerminiAtIndex(sl.NodeLocation().Region())) for i := 0; i < int(zones); i++ { if sl.subClients[i] != nil { - sl.subClients[i].GenerateRecoveryPendingHeader(context.Background(), newPendingHeader.Header(), newPendingHeader.Termini()) + sl.subClients[i].GenerateRecoveryPendingHeader(context.Background(), newPendingHeader.WorkObject(), newPendingHeader.Termini()) } } } else { @@ -1842,9 +1847,9 @@ func (sl *Slice) AddGenesisHash(hash common.Hash) { } // AddGenesisPendingEtxs adds the genesis pending etxs to the db -func (sl *Slice) AddGenesisPendingEtxs(block *types.Block) { - sl.hc.pendingEtxs.Add(block.Hash(), types.PendingEtxs{block.Header(), types.Transactions{}}) - rawdb.WritePendingEtxs(sl.sliceDb, types.PendingEtxs{block.Header(), types.Transactions{}}) +func (sl *Slice) AddGenesisPendingEtxs(block *types.WorkObject) { + sl.hc.pendingEtxs.Add(block.Hash(), types.PendingEtxs{block, types.Transactions{}}) + rawdb.WritePendingEtxs(sl.sliceDb, types.PendingEtxs{block, types.Transactions{}}) } // SubscribeExpansionEvent subscribes to the expansion feed diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 769d0cafa0..7ecc0986ae 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -87,9 +87,9 @@ type Pruner struct { // NewPruner creates the pruner instance. func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint64, logger *log.Logger, location common.Location) (*Pruner, error) { - headBlock := rawdb.ReadHeadBlock(db, location) + headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { - return nil, errors.New("Failed to load head block") + return nil, errors.New("failed to load head block") } snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.EVMRoot(), false, false) if err != nil { @@ -376,9 +376,9 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string, loc if stateBloomPath == "" { return nil // nothing to recover } - headBlock := rawdb.ReadHeadBlock(db, location) + headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { - return errors.New("Failed to load head block") + return errors.New("failed to load head work object") } // Initialize the snapshot tree in recovery mode to handle this special case: // - Users run the `prune-state` command multiple times @@ -432,7 +432,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom, location common.L if genesisHash == (common.Hash{}) { return errors.New("missing genesis hash") } - genesis := rawdb.ReadBlock(db, genesisHash, 0, location) + genesis := rawdb.ReadWorkObject(db, genesisHash, types.BlockObject) if genesis == nil { return errors.New("missing genesis block") } diff --git a/core/state_processor.go b/core/state_processor.go index b59b8ed6e7..62b3748b88 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -198,11 +198,11 @@ func NewStateProcessor(config *params.ChainConfig, hc *HeaderChain, engine conse // Process returns the receipts and logs accumulated during the process and // returns the amount of gas that was used in the process. If any of the // transactions failed to execute due to insufficient gas it will return an error. -func (p *StateProcessor) Process(block *types.Block, etxSet *types.EtxSet) (types.Receipts, []*types.Transaction, []*types.Log, *state.StateDB, uint64, error) { +func (p *StateProcessor) Process(block *types.WorkObject, etxSet *types.EtxSet) (types.Receipts, []*types.Transaction, []*types.Log, *state.StateDB, uint64, error) { var ( receipts types.Receipts usedGas = new(uint64) - header = types.CopyHeader(block.Header()) + header = types.CopyWorkObject(block) blockHash = block.Hash() nodeLocation = p.hc.NodeLocation() nodeCtx = p.hc.NodeCtx() @@ -212,7 +212,7 @@ func (p *StateProcessor) Process(block *types.Block, etxSet *types.EtxSet) (type ) start := time.Now() - parent := p.hc.GetBlock(block.Header().ParentHash(nodeCtx), block.NumberU64(nodeCtx)-1) + parent := p.hc.GetBlock(block.ParentHash(nodeCtx), block.NumberU64(nodeCtx)-1) if parent == nil { return types.Receipts{}, []*types.Transaction{}, []*types.Log{}, nil, 0, errors.New("parent block is nil for the block given to process") } @@ -318,7 +318,7 @@ func (p *StateProcessor) Process(block *types.Block, etxSet *types.EtxSet) (type continue } else { prevZeroBal := prepareApplyETX(statedb, tx, nodeLocation) - receipt, err = applyTransaction(msg, parent.Header(), p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, &etxRLimit, &etxPLimit, p.logger) + receipt, err = applyTransaction(msg, parent, p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, &etxRLimit, &etxPLimit, p.logger) statedb.SetBalance(common.ZeroInternal(nodeLocation), prevZeroBal) // Reset the balance to what it previously was. Residual balance will be lost if err != nil { return nil, nil, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) @@ -330,7 +330,7 @@ func (p *StateProcessor) Process(block *types.Block, etxSet *types.EtxSet) (type } else if tx.Type() == types.QuaiTxType { startTimeTx := time.Now() - receipt, err = applyTransaction(msg, parent.Header(), p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, &etxRLimit, &etxPLimit, p.logger) + receipt, err = applyTransaction(msg, parent, p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, &etxRLimit, &etxPLimit, p.logger) if err != nil { return nil, nil, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } @@ -378,13 +378,13 @@ func (p *StateProcessor) Process(block *types.Block, etxSet *types.EtxSet) (type } } - if (etxSet != nil && etxSet.Len() > 0 && totalEtxGas < minimumEtxGas) || totalEtxGas > maximumEtxGas { + if etxSet != nil && (etxSet.Len() > 0 && totalEtxGas < minimumEtxGas) || totalEtxGas > maximumEtxGas { return nil, nil, nil, nil, 0, fmt.Errorf("total gas used by ETXs %d is not within the range %d to %d", totalEtxGas, minimumEtxGas, maximumEtxGas) } time4 := common.PrettyDuration(time.Since(start)) // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) - p.engine.Finalize(p.hc, header, statedb, block.Transactions(), block.Uncles()) + p.engine.Finalize(p.hc, block, statedb) time5 := common.PrettyDuration(time.Since(start)) p.logger.WithFields(log.Fields{ @@ -414,7 +414,7 @@ func (p *StateProcessor) Process(block *types.Block, etxSet *types.EtxSet) (type return receipts, qiEtxs, allLogs, statedb, *usedGas, nil } -func applyTransaction(msg types.Message, parent *types.Header, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM, etxRLimit, etxPLimit *int, logger *log.Logger) (*types.Receipt, error) { +func applyTransaction(msg types.Message, parent *types.WorkObject, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM, etxRLimit, etxPLimit *int, logger *log.Logger) (*types.Receipt, error) { nodeLocation := config.Location // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) @@ -478,7 +478,7 @@ func applyTransaction(msg types.Message, parent *types.Header, config *params.Ch return receipt, err } -func ProcessQiTx(tx *types.Transaction, chain ChainContext, updateState bool, currentHeader *types.Header, statedb *state.StateDB, gp *GasPool, usedGas *uint64, signer types.Signer, location common.Location, chainId big.Int, etxRLimit, etxPLimit *int) (*big.Int, []*types.Transaction, error) { +func ProcessQiTx(tx *types.Transaction, chain ChainContext, updateState bool, currentHeader *types.WorkObject, statedb *state.StateDB, gp *GasPool, usedGas *uint64, signer types.Signer, location common.Location, chainId big.Int, etxRLimit, etxPLimit *int) (*big.Int, []*types.Transaction, error) { // Sanity checks if tx == nil || tx.Type() != types.QiTxType { return nil, nil, fmt.Errorf("tx %032x is not a QiTx", tx.Hash()) @@ -643,11 +643,10 @@ func ProcessQiTx(tx *types.Transaction, chain ChainContext, updateState bool, cu } // Apply State -func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInboundEtxs types.Transactions) ([]*types.Log, error) { +func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.WorkObject, newInboundEtxs types.Transactions) ([]*types.Log, error) { nodeCtx := p.hc.NodeCtx() start := time.Now() blockHash := block.Hash() - header := types.CopyHeader(block.Header()) parentHash := block.ParentHash(nodeCtx) parentNumber := block.NumberU64(nodeCtx) - 1 @@ -658,7 +657,7 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound } parentNumber = parent.NumberU64(nodeCtx) } - etxSet := rawdb.ReadEtxSet(p.hc.bc.db, parentHash, parentNumber, p.hc.NodeLocation()) + etxSet := rawdb.ReadEtxSet(p.hc.bc.db, parentHash, parentNumber) time1 := common.PrettyDuration(time.Since(start)) if etxSet == nil { return nil, errors.New("failed to load etx set") @@ -717,7 +716,7 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound etxSet.Update(newInboundEtxs, p.hc.NodeLocation(), func(hash common.Hash, etx *types.Transaction) { rawdb.WriteETX(batch, hash, etx) // This must be done because of rawdb <-> types import cycle }) - rawdb.WriteEtxSet(batch, header.Hash(), header.NumberU64(nodeCtx), etxSet) + rawdb.WriteEtxSet(batch, block.Hash(), block.NumberU64(nodeCtx), etxSet) time12 := common.PrettyDuration(time.Since(start)) p.logger.WithFields(log.Fields{ @@ -742,7 +741,7 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, parent *types.Header, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config, etxRLimit, etxPLimit *int, logger *log.Logger) (*types.Receipt, error) { +func ApplyTransaction(config *params.ChainConfig, parent *types.WorkObject, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.WorkObject, tx *types.Transaction, usedGas *uint64, cfg vm.Config, etxRLimit, etxPLimit *int, logger *log.Logger) (*types.Receipt, error) { nodeCtx := config.Location.Context() msg, err := tx.AsMessage(types.MakeSigner(config, header.Number(nodeCtx)), header.BaseFee()) if err != nil { @@ -821,7 +820,7 @@ func (p *StateProcessor) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxL if lookup, exist := p.txLookupCache.Get(hash); exist { return lookup.(*rawdb.LegacyTxLookupEntry) } - tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(p.hc.headerDb, hash, p.hc.NodeLocation()) + tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(p.hc.headerDb, hash) if tx == nil { return nil } @@ -866,9 +865,9 @@ func (p *StateProcessor) ContractCodeWithPrefix(hash common.Hash) ([]byte, error // - checklive: if true, then the live 'blockchain' state database is used. If the caller want to // perform Commit or other 'save-to-disk' changes, this should be set to false to avoid // storing trash persistently -func (p *StateProcessor) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) { +func (p *StateProcessor) StateAtBlock(block *types.WorkObject, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) { var ( - current *types.Header + current *types.WorkObject database state.Database utxoDatabase state.Database report = true @@ -884,14 +883,14 @@ func (p *StateProcessor) StateAtBlock(block *types.Block, reexec uint64, base *s } } - var newHeads []*types.Header + var newHeads []*types.WorkObject if base != nil { // The optional base statedb is given, mark the start point as parent block statedb, database, utxoDatabase, report = base, base.Database(), base.UTXODatabase(), false current = p.hc.GetHeaderOrCandidate(block.ParentHash(nodeCtx), block.NumberU64(nodeCtx)-1) } else { // Otherwise try to reexec blocks until we find a state or reach our limit - current = types.CopyHeader(block.Header()) + current = types.CopyWorkObject(block) // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. @@ -919,7 +918,7 @@ func (p *StateProcessor) StateAtBlock(block *types.Block, reexec uint64, base *s if parent == nil { return nil, fmt.Errorf("missing block %v %d", current.ParentHash(nodeCtx), current.NumberU64(nodeCtx)-1) } - current = types.CopyHeader(parent) + current = types.CopyWorkObject(parent) statedb, err = state.New(current.EVMRoot(), current.UTXORoot(), database, utxoDatabase, nil, nodeLocation) if err == nil { @@ -961,16 +960,16 @@ func (p *StateProcessor) StateAtBlock(block *types.Block, reexec uint64, base *s parent := p.hc.GetHeaderByHash(parentHash) parentNumber = parent.NumberU64(nodeCtx) } - etxSet := rawdb.ReadEtxSet(p.hc.bc.db, parentHash, parentNumber, p.hc.NodeLocation()) + etxSet := rawdb.ReadEtxSet(p.hc.bc.db, parentHash, parentNumber) if etxSet == nil { return nil, errors.New("etxSet set is nil in StateProcessor") } - inboundEtxs := rawdb.ReadInboundEtxs(p.hc.bc.db, current.Hash(), p.hc.NodeLocation()) + inboundEtxs := rawdb.ReadInboundEtxs(p.hc.bc.db, current.Hash()) etxSet.Update(inboundEtxs, nodeLocation, func(hash common.Hash, etx *types.Transaction) { rawdb.WriteETX(rawdb.NewMemoryDatabase(), hash, etx) }) - currentBlock := rawdb.ReadBlock(p.hc.bc.db, current.Hash(), current.NumberU64(nodeCtx), p.hc.NodeLocation()) + currentBlock := rawdb.ReadWorkObject(p.hc.bc.db, current.Hash(), types.BlockObject) if currentBlock == nil { return nil, errors.New("detached block found trying to regenerate state") } @@ -1012,7 +1011,7 @@ func (p *StateProcessor) StateAtBlock(block *types.Block, reexec uint64, base *s } // stateAtTransaction returns the execution environment of a certain transaction. -func (p *StateProcessor) StateAtTransaction(block *types.Block, txIndex int, reexec uint64) (Message, vm.BlockContext, *state.StateDB, error) { +func (p *StateProcessor) StateAtTransaction(block *types.WorkObject, txIndex int, reexec uint64) (Message, vm.BlockContext, *state.StateDB, error) { nodeCtx := p.hc.NodeCtx() // Short circuit if it's genesis block. if block.NumberU64(nodeCtx) == 0 { @@ -1038,7 +1037,7 @@ func (p *StateProcessor) StateAtTransaction(block *types.Block, txIndex int, ree // Assemble the transaction call message and return if the requested offset msg, _ := tx.AsMessage(signer, block.BaseFee()) txContext := NewEVMTxContext(msg) - context := NewEVMBlockContext(block.Header(), p.hc, nil) + context := NewEVMBlockContext(block, p.hc, nil) if idx == txIndex { return msg, context, statedb, nil } diff --git a/core/tx_cacher.go b/core/tx_cacher.go index 7ef5760504..dbd099202e 100644 --- a/core/tx_cacher.go +++ b/core/tx_cacher.go @@ -94,7 +94,7 @@ func (cacher *txSenderCacher) recover(signer types.Signer, txs []*types.Transact // recoverFromBlocks recovers the senders from a batch of blocks and caches them // back into the same data structures. There is no validation being done, nor // any reaction to invalid signatures. That is up to calling code later. -func (cacher *txSenderCacher) recoverFromBlocks(signer types.Signer, blocks []*types.Block) { +func (cacher *txSenderCacher) recoverFromBlocks(signer types.Signer, blocks []*types.WorkObject) { count := 0 for _, block := range blocks { count += len(block.Transactions()) diff --git a/core/tx_pool.go b/core/tx_pool.go index f63fe11762..416a3429ec 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -142,17 +142,17 @@ const ( // blockChain provides the state of blockchain and current gas limit to do // some pre checks in tx pool and event subscribers. type blockChain interface { - CurrentBlock() *types.Block - GetBlock(hash common.Hash, number uint64) *types.Block + CurrentBlock() *types.WorkObject + GetBlock(hash common.Hash, number uint64) *types.WorkObject StateAt(root common.Hash, utxoRoot common.Hash) (*state.StateDB, error) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription IsGenesisHash(hash common.Hash) bool CheckIfEtxIsEligible(hash common.Hash, location common.Location) bool Engine() consensus.Engine - GetHeaderOrCandidate(common.Hash, uint64) *types.Header - GetHeader(common.Hash, uint64) *types.Header + GetHeaderOrCandidate(common.Hash, uint64) *types.WorkObject + GetHeader(common.Hash, uint64) *types.WorkObject NodeCtx() int - GetHeaderByHash(common.Hash) *types.Header + GetHeaderByHash(common.Hash) *types.WorkObject } // TxPoolConfig are the configuration parameters of the transaction pool. @@ -308,7 +308,7 @@ type TxPool struct { } type txpoolResetRequest struct { - oldHead, newHead *types.Header + oldHead, newHead *types.WorkObject } type newSender struct { @@ -377,7 +377,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block pool.locals.add(addr) } pool.priced = newTxPricedList(pool.all) - pool.reset(nil, chain.CurrentBlock().Header()) + pool.reset(nil, chain.CurrentBlock()) // Start the reorg loop early so it can handle requests generated during journal loading. pool.wg.Add(1) @@ -426,7 +426,7 @@ func (pool *TxPool) loop() { // Handle ChainHeadEvent case ev := <-pool.chainHeadCh: if ev.Block != nil { - pool.requestReset(head.Header(), ev.Block.Header()) + pool.requestReset(head, ev.Block) head = ev.Block } @@ -1140,7 +1140,7 @@ func (pool *TxPool) addQiTx(tx *types.Transaction, grabLock bool) error { if grabLock { pool.mu.RLock() // need to readlock the whole pool because we are reading the current state } - fee, _, err := ProcessQiTx(tx, pool.chain, false, pool.chain.CurrentBlock().Header(), pool.currentState, &gp, new(uint64), pool.signer, location, *pool.chainconfig.ChainID, &etxRLimit, &etxPLimit) + fee, _, err := ProcessQiTx(tx, pool.chain, false, pool.chain.CurrentBlock(), pool.currentState, &gp, new(uint64), pool.signer, location, *pool.chainconfig.ChainID, &etxRLimit, &etxPLimit) if err != nil { pool.mu.RUnlock() pool.logger.WithFields(logrus.Fields{ @@ -1298,7 +1298,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // requestReset requests a pool reset to the new head block. // The returned channel is closed when the reset has occurred. -func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { +func (pool *TxPool) requestReset(oldHead *types.WorkObject, newHead *types.WorkObject) chan struct{} { select { case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: return <-pool.reorgDoneCh @@ -1509,7 +1509,7 @@ func (pool *TxPool) runReorg(done chan struct{}, cancel chan struct{}, reset *tx // reset retrieves the current state of the blockchain and ensures the content // of the transaction pool is valid with regard to the chain state. // The mempool lock must be held by the caller. -func (pool *TxPool) reset(oldHead, newHead *types.Header) { +func (pool *TxPool) reset(oldHead, newHead *types.WorkObject) { nodeCtx := pool.chainconfig.Location.Context() var start time.Time if pool.reOrgCounter == c_reorgCounterThreshold { @@ -1610,7 +1610,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { } // Initialize the internal state to the current head if newHead == nil { - newHead = pool.chain.CurrentBlock().Header() // Special case during testing + newHead = pool.chain.CurrentBlock() // Special case during testing } evmRoot := newHead.EVMRoot() diff --git a/core/types.go b/core/types.go index 9bc1bb76d5..32520c3337 100644 --- a/core/types.go +++ b/core/types.go @@ -27,11 +27,11 @@ import ( // done by the specific consensus engines. type Validator interface { // ValidateBody validates the given block's content. - ValidateBody(block *types.Block) error + ValidateBody(block *types.WorkObject) error // ValidateState validates the given statedb and optionally the receipts and // gas used. - ValidateState(block *types.Block, state *state.StateDB, receipts types.Receipts, utxoEtxs []*types.Transaction, etxSet *types.EtxSet, usedGas uint64) error + ValidateState(block *types.WorkObject, state *state.StateDB, receipts types.Receipts, utxoEtxs []*types.Transaction, etxSet *types.EtxSet, usedGas uint64) error } // Prefetcher is an interface for pre-caching transaction signatures and state. @@ -39,7 +39,7 @@ type Prefetcher interface { // Prefetch processes the state changes according to the Quai rules by running // the transaction messages using the statedb, but any changes are discarded. The // only goal is to pre-cache transaction signatures and state trie nodes. - Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) + Prefetch(block *types.WorkObject, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) } // Processor is an interface for processing blocks using a given initial state. @@ -47,6 +47,6 @@ type Processor interface { // Process processes the state changes according to the Quai rules by running // the transaction messages using the statedb and applying any rewards to both // the processor (coinbase) and any included uncles. - Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) - Apply(block *types.Block) error + Process(block *types.WorkObject, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) + Apply(block *types.WorkObject) error } diff --git a/core/types/block.go b/core/types/block.go index 8132f58070..ac98437a02 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -27,7 +27,6 @@ import ( "reflect" "sync" "sync/atomic" - "time" "google.golang.org/protobuf/proto" "lukechampine.com/blake3" @@ -82,47 +81,48 @@ func (n *BlockNonce) UnmarshalText(input []byte) error { return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) } -//go:generate gencodec -type Header -field-override headerMarshaling -out gen_header_json.go +type writeCounter common.StorageSize + +func (c *writeCounter) Write(b []byte) (int, error) { + *c += writeCounter(len(b)) + return len(b), nil +} // Header represents a block header in the Quai blockchain. type Header struct { - parentHash []common.Hash `json:"parentHash" gencodec:"required"` - uncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - coinbase common.Address `json:"miner" gencodec:"required"` - evmRoot common.Hash `json:"evmRoot" gencodec:"required"` - utxoRoot common.Hash `json:"utxoRoot" gencodec:"required"` - txHash common.Hash `json:"transactionsRoot" gencodec:"required"` - etxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` - etxSetHash common.Hash `json:"etxSetHash" gencodec:"required"` - etxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` - manifestHash []common.Hash `json:"manifestHash" gencodec:"required"` - receiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - difficulty *big.Int `json:"difficulty" gencodec:"required"` - parentEntropy []*big.Int `json:"parentEntropy" gencodec:"required"` - parentDeltaS []*big.Int `json:"parentDeltaS" gencodec:"required"` - parentUncledSubDeltaS []*big.Int `json:"parentUncledSubDeltaS" gencodec:"required"` - efficiencyScore uint16 `json:"efficiencyScore" gencodec:"required"` - thresholdCount uint16 `json:"thresholdCount" gencodec:"required"` - expansionNumber uint8 `json:"expansionNumber" gencodec:"required"` - etxEligibleSlices common.Hash `json:"etxEligibleSlices" gencodec:"required"` - primeTerminus common.Hash `json:"primeTerminus" gencodec:"required"` - interlinkRootHash common.Hash `json:"interlinkRootHash" gencodec:"required"` - uncledS *big.Int `json:"uncledLogS" gencodec:"required"` - number []*big.Int `json:"number" gencodec:"required"` - gasLimit uint64 `json:"gasLimit" gencodec:"required"` - gasUsed uint64 `json:"gasUsed" gencodec:"required"` - baseFee *big.Int `json:"baseFeePerGas" gencodec:"required"` - location common.Location `json:"location" gencodec:"required"` - time uint64 `json:"timestamp" gencodec:"required"` - extra []byte `json:"extraData" gencodec:"required"` - mixHash common.Hash `json:"mixHash" gencodec:"required"` - nonce BlockNonce `json:"nonce"` + parentHash []common.Hash `json:"parentHash" gencodec:"required"` + uncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + coinbase common.Address `json:"miner" gencodec:"required"` + evmRoot common.Hash `json:"evmRoot" gencodec:"required"` + utxoRoot common.Hash `json:"utxoRoot" gencodec:"required"` + txHash common.Hash `json:"transactionsRoot" gencodec:"required"` + etxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` + etxSetHash common.Hash `json:"etxSetHash" gencodec:"required"` + etxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` + manifestHash []common.Hash `json:"manifestHash" gencodec:"required"` + receiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + difficulty *big.Int `json:"difficulty" gencodec:"required"` + parentEntropy []*big.Int `json:"parentEntropy" gencodec:"required"` + parentDeltaS []*big.Int `json:"parentDeltaS" gencodec:"required"` + parentUncledSubDeltaS []*big.Int `json:"parentUncledSubDeltaS" gencodec:"required"` + efficiencyScore uint16 `json:"efficiencyScore" gencodec:"required"` + thresholdCount uint16 `json:"thresholdCount" gencodec:"required"` + expansionNumber uint8 `json:"expansionNumber" gencodec:"required"` + etxEligibleSlices common.Hash `json:"etxEligibleSlices" gencodec:"required"` + primeTerminus common.Hash `json:"primeTerminus" gencodec:"required"` + interlinkRootHash common.Hash `json:"interlinkRootHash" gencodec:"required"` + uncledS *big.Int `json:"uncledLogS" gencodec:"required"` + number []*big.Int `json:"number" gencodec:"required"` + gasLimit uint64 `json:"gasLimit" gencodec:"required"` + gasUsed uint64 `json:"gasUsed" gencodec:"required"` + baseFee *big.Int `json:"baseFeePerGas" gencodec:"required"` + extra []byte `json:"extraData" gencodec:"required"` + mixHash common.Hash `json:"mixHash" gencodec:"required"` + nonce BlockNonce `json:"nonce"` // caches - hash atomic.Value - sealHash atomic.Value - PowHash atomic.Value - PowDigest atomic.Value + hash atomic.Value + sealHash atomic.Value } // field type overrides for gencodec @@ -143,19 +143,19 @@ type headerMarshaling struct { } // Construct an empty header -func EmptyHeader() *Header { +func EmptyHeader(nodeCtx int) *WorkObject { h := &Header{} - h.parentHash = make([]common.Hash, common.HierarchyDepth) + wo := &WorkObject{woHeader: &WorkObjectHeader{}, woBody: &WorkObjectBody{}, tx: &Transaction{}} + h.parentHash = make([]common.Hash, common.HierarchyDepth-1) h.manifestHash = make([]common.Hash, common.HierarchyDepth) h.parentEntropy = make([]*big.Int, common.HierarchyDepth) h.parentDeltaS = make([]*big.Int, common.HierarchyDepth) h.parentUncledSubDeltaS = make([]*big.Int, common.HierarchyDepth) - h.number = make([]*big.Int, common.HierarchyDepth) + h.number = make([]*big.Int, common.HierarchyDepth-1) h.difficulty = big.NewInt(0) h.uncledS = big.NewInt(0) h.evmRoot = EmptyRootHash h.utxoRoot = EmptyRootHash - h.mixHash = EmptyRootHash h.txHash = EmptyRootHash h.etxHash = EmptyRootHash h.etxSetHash = EmptyEtxSetHash @@ -175,9 +175,25 @@ func EmptyHeader() *Header { h.parentEntropy[i] = big.NewInt(0) h.parentDeltaS[i] = big.NewInt(0) h.parentUncledSubDeltaS[i] = big.NewInt(0) + } + for i := 0; i < common.HierarchyDepth-1; i++ { + h.parentHash[i] = EmptyRootHash h.number[i] = big.NewInt(0) } - return h + wo.woHeader.SetHeaderHash(EmptyRootHash) + wo.woHeader.SetParentHash(EmptyRootHash) + wo.woHeader.SetNumber(big.NewInt(0)) + wo.woHeader.SetDifficulty(big.NewInt(0)) + wo.woHeader.SetTxHash(EmptyRootHash) + wo.woHeader.SetLocation(common.Location{}) + wo.woHeader.SetNonce(EncodeNonce(0)) + wo.woHeader.SetTime(0) + wo.woBody.SetHeader(h) + wo.woBody.SetUncles([]*WorkObjectHeader{}) + wo.woBody.SetTransactions([]*Transaction{}) + wo.woBody.SetExtTransactions([]*Transaction{}) + wo.woBody.SetManifest(BlockManifest{}) + return NewWorkObjectWithHeader(wo, &Transaction{}, nodeCtx, BlockObject) } // ProtoEncode serializes h into the Quai Proto Header format @@ -203,8 +219,6 @@ func (h *Header) ProtoEncode() (*ProtoHeader, error) { efficiencyScore := uint64(h.EfficiencyScore()) thresholdCount := uint64(h.ThresholdCount()) expansionNumber := uint64(h.ExpansionNumber()) - time := h.Time() - nonce := h.Nonce().Uint64() protoHeader := &ProtoHeader{ UncleHash: &uncleHash, @@ -219,7 +233,6 @@ func (h *Header) ProtoEncode() (*ProtoHeader, error) { PrimeTerminus: &primeTerminus, InterlinkRootHash: &interlinkRootHash, EtxEligibleSlices: &etxEligibleSlices, - Difficulty: h.Difficulty().Bytes(), UncledS: h.UncledS().Bytes(), GasLimit: &gasLimit, GasUsed: &gasUsed, @@ -227,15 +240,11 @@ func (h *Header) ProtoEncode() (*ProtoHeader, error) { ThresholdCount: &thresholdCount, ExpansionNumber: &expansionNumber, BaseFee: h.BaseFee().Bytes(), - Location: h.Location().ProtoEncode(), - Time: &time, Extra: h.Extra(), MixHash: &mixHash, - Nonce: &nonce, } for i := 0; i < common.HierarchyDepth; i++ { - protoHeader.ParentHash = append(protoHeader.ParentHash, h.ParentHash(i).ProtoEncode()) protoHeader.ManifestHash = append(protoHeader.ManifestHash, h.ManifestHash(i).ProtoEncode()) if h.ParentEntropy(i) != nil { protoHeader.ParentEntropy = append(protoHeader.ParentEntropy, h.ParentEntropy(i).Bytes()) @@ -246,15 +255,19 @@ func (h *Header) ProtoEncode() (*ProtoHeader, error) { if h.ParentUncledSubDeltaS(i) != nil { protoHeader.ParentUncledSubDeltaS = append(protoHeader.ParentUncledSubDeltaS, h.ParentUncledSubDeltaS(i).Bytes()) } + } + for i := 0; i < common.HierarchyDepth-1; i++ { + protoHeader.ParentHash = append(protoHeader.ParentHash, h.ParentHash(i).ProtoEncode()) if h.Number(i) != nil { protoHeader.Number = append(protoHeader.Number, h.Number(i).Bytes()) } } + return protoHeader, nil } // ProtoDecode deserializes the ProtoHeader into the Header format -func (h *Header) ProtoDecode(protoHeader *ProtoHeader) error { +func (h *Header) ProtoDecode(protoHeader *ProtoHeader, location common.Location) error { if protoHeader.ParentHash == nil { return errors.New("missing required field 'ParentHash' in Header") } @@ -294,15 +307,9 @@ func (h *Header) ProtoDecode(protoHeader *ProtoHeader) error { if protoHeader.InterlinkRootHash == nil { return errors.New("missing required field 'InterlinkRootHash' in Header") } - if protoHeader.Difficulty == nil { - return errors.New("missing required field 'Difficulty' in Header") - } if protoHeader.BaseFee == nil { return errors.New("missing required field 'BaseFee' in Header") } - if protoHeader.MixHash == nil { - return errors.New("missing required field 'MixHash' in Header") - } if protoHeader.ParentEntropy == nil { return errors.New("missing required field 'ParentEntropy' in Header") } @@ -318,12 +325,6 @@ func (h *Header) ProtoDecode(protoHeader *ProtoHeader) error { if protoHeader.Number == nil { return errors.New("missing required field 'Number' in Header") } - if protoHeader.Location == nil { - return errors.New("missing required field 'Location' in Header") - } - if protoHeader.MixHash == nil { - return errors.New("missing required field 'MixHash' in Header") - } if protoHeader.EfficiencyScore == nil { return errors.New("missing required field 'EfficiencyScore' in Header") } @@ -341,24 +342,26 @@ func (h *Header) ProtoDecode(protoHeader *ProtoHeader) error { } // Initialize the array fields before setting - h.parentHash = make([]common.Hash, common.HierarchyDepth) + h.parentHash = make([]common.Hash, common.HierarchyDepth-1) h.manifestHash = make([]common.Hash, common.HierarchyDepth) h.parentEntropy = make([]*big.Int, common.HierarchyDepth) h.parentDeltaS = make([]*big.Int, common.HierarchyDepth) h.parentUncledSubDeltaS = make([]*big.Int, common.HierarchyDepth) - h.number = make([]*big.Int, common.HierarchyDepth) + h.number = make([]*big.Int, common.HierarchyDepth-1) for i := 0; i < common.HierarchyDepth; i++ { - h.SetParentHash(common.BytesToHash(protoHeader.GetParentHash()[i].GetValue()), i) h.SetManifestHash(common.BytesToHash(protoHeader.GetManifestHash()[i].GetValue()), i) h.SetParentEntropy(new(big.Int).SetBytes(protoHeader.GetParentEntropy()[i]), i) h.SetParentDeltaS(new(big.Int).SetBytes(protoHeader.GetParentDeltaS()[i]), i) h.SetParentUncledSubDeltaS(new(big.Int).SetBytes(protoHeader.GetParentUncledSubDeltaS()[i]), i) + } + for i := 0; i < common.HierarchyDepth-1; i++ { h.SetNumber(new(big.Int).SetBytes(protoHeader.GetNumber()[i]), i) + h.SetParentHash(common.BytesToHash(protoHeader.GetParentHash()[i].GetValue()), i) } h.SetUncleHash(common.BytesToHash(protoHeader.GetUncleHash().GetValue())) - h.SetCoinbase(common.BytesToAddress(protoHeader.GetCoinbase(), protoHeader.GetLocation().GetValue())) + h.SetCoinbase(common.BytesToAddress(protoHeader.GetCoinbase(), location)) h.SetEVMRoot(common.BytesToHash(protoHeader.GetEvmRoot().GetValue())) h.SetUTXORoot(common.BytesToHash(protoHeader.GetUtxoRoot().GetValue())) h.SetTxHash(common.BytesToHash(protoHeader.GetTxHash().GetValue())) @@ -368,16 +371,11 @@ func (h *Header) ProtoDecode(protoHeader *ProtoHeader) error { h.SetEtxRollupHash(common.BytesToHash(protoHeader.GetEtxRollupHash().GetValue())) h.SetPrimeTerminus(common.BytesToHash(protoHeader.GetPrimeTerminus().GetValue())) h.SetInterlinkRootHash(common.BytesToHash(protoHeader.GetInterlinkRootHash().GetValue())) - h.SetDifficulty(new(big.Int).SetBytes(protoHeader.GetDifficulty())) h.SetUncledS(new(big.Int).SetBytes(protoHeader.GetUncledS())) h.SetGasLimit(protoHeader.GetGasLimit()) h.SetGasUsed(protoHeader.GetGasUsed()) h.SetBaseFee(new(big.Int).SetBytes(protoHeader.GetBaseFee())) - h.SetTime(protoHeader.GetTime()) h.SetExtra(protoHeader.GetExtra()) - h.SetMixHash(common.BytesToHash(protoHeader.GetMixHash().GetValue())) - h.SetNonce(uint64ToByteArr(protoHeader.GetNonce())) - h.SetLocation(protoHeader.GetLocation().GetValue()) h.SetEfficiencyScore(uint16(protoHeader.GetEfficiencyScore())) h.SetThresholdCount(uint16(protoHeader.GetThresholdCount())) h.SetExpansionNumber(uint8(protoHeader.GetExpansionNumber())) @@ -398,7 +396,6 @@ func (h *Header) RPCMarshalHeader() map[string]interface{} { result := map[string]interface{}{ "hash": h.Hash(), "parentHash": h.ParentHashArray(), - "difficulty": (*hexutil.Big)(h.Difficulty()), "uncledS": (*hexutil.Big)(h.UncledS()), "nonce": h.Nonce(), "sha3Uncles": h.UncleHash(), @@ -407,7 +404,6 @@ func (h *Header) RPCMarshalHeader() map[string]interface{} { "miner": h.Coinbase(), "extraData": hexutil.Bytes(h.Extra()), "size": hexutil.Uint64(h.Size()), - "timestamp": hexutil.Uint64(h.Time()), "transactionsRoot": h.TxHash(), "receiptsRoot": h.ReceiptHash(), "extTransactionsRoot": h.EtxHash(), @@ -418,7 +414,6 @@ func (h *Header) RPCMarshalHeader() map[string]interface{} { "manifestHash": h.ManifestHashArray(), "gasLimit": hexutil.Uint(h.GasLimit()), "gasUsed": hexutil.Uint(h.GasUsed()), - "location": hexutil.Bytes(h.Location()), "mixHash": h.MixHash(), "efficiencyScore": hexutil.Uint64(h.EfficiencyScore()), "thresholdCount": hexutil.Uint64(h.ThresholdCount()), @@ -432,11 +427,14 @@ func (h *Header) RPCMarshalHeader() map[string]interface{} { parentUncledS := make([]*hexutil.Big, common.HierarchyDepth) parentUncledSubDeltaS := make([]*hexutil.Big, common.HierarchyDepth) for i := 0; i < common.HierarchyDepth; i++ { - number[i] = (*hexutil.Big)(h.Number(i)) parentEntropy[i] = (*hexutil.Big)(h.ParentEntropy(i)) parentDeltaS[i] = (*hexutil.Big)(h.ParentDeltaS(i)) parentUncledSubDeltaS[i] = (*hexutil.Big)(h.ParentUncledSubDeltaS(i)) } + for i := 0; i < common.HierarchyDepth-1; i++ { + number[i] = (*hexutil.Big)(h.Number(i)) + } + result["number"] = number result["parentEntropy"] = parentEntropy result["parentDeltaS"] = parentDeltaS @@ -496,9 +494,6 @@ func (h *Header) ManifestHash(nodeCtx int) common.Hash { func (h *Header) ReceiptHash() common.Hash { return h.receiptHash } -func (h *Header) Difficulty() *big.Int { - return h.difficulty -} func (h *Header) Number(nodeCtx int) *big.Int { return h.number[nodeCtx] } @@ -526,8 +521,6 @@ func (h *Header) EtxEligibleSlices() common.Hash { func (h *Header) BaseFee() *big.Int { return h.baseFee } -func (h *Header) Location() common.Location { return h.location } -func (h *Header) Time() uint64 { return h.time } func (h *Header) Extra() []byte { return common.CopyBytes(h.extra) } func (h *Header) MixHash() common.Hash { return h.mixHash } func (h *Header) PrimeTerminus() common.Hash { return h.primeTerminus } @@ -585,6 +578,11 @@ func (h *Header) SetPrimeTerminus(val common.Hash) { h.sealHash = atomic.Value{} // clear sealHash cache h.primeTerminus = val } +func (h *Header) SetUncledS(val *big.Int) { + h.hash = atomic.Value{} // clear hash cache + h.sealHash = atomic.Value{} // clear sealHash cache + h.uncledS = val +} func (h *Header) SetInterlinkRootHash(val common.Hash) { h.hash = atomic.Value{} // clear hash cache h.sealHash = atomic.Value{} // clear sealHash cache @@ -619,16 +617,6 @@ func (h *Header) SetReceiptHash(val common.Hash) { h.sealHash = atomic.Value{} // clear sealHash cache h.receiptHash = val } -func (h *Header) SetDifficulty(val *big.Int) { - h.hash = atomic.Value{} // clear hash cache - h.sealHash = atomic.Value{} // clear sealHash cache - h.difficulty = new(big.Int).Set(val) -} -func (h *Header) SetUncledS(val *big.Int) { - h.hash = atomic.Value{} // clear hash cache - h.sealHash = atomic.Value{} // clear sealHash cache - h.uncledS = new(big.Int).Set(val) -} func (h *Header) SetNumber(val *big.Int, nodeCtx int) { h.hash = atomic.Value{} // clear hash cache h.sealHash = atomic.Value{} // clear sealHash cache @@ -669,30 +657,12 @@ func (h *Header) SetBaseFee(val *big.Int) { h.sealHash = atomic.Value{} // clear sealHash cache h.baseFee = new(big.Int).Set(val) } -func (h *Header) SetLocation(val common.Location) { - h.hash = atomic.Value{} // clear hash cache - h.sealHash = atomic.Value{} // clear sealHash cache - h.location = val -} -func (h *Header) SetTime(val uint64) { - h.hash = atomic.Value{} // clear hash cache - h.sealHash = atomic.Value{} // clear sealHash cache - h.time = val -} func (h *Header) SetExtra(val []byte) { h.hash = atomic.Value{} // clear hash cache h.sealHash = atomic.Value{} // clear sealHash cache h.extra = make([]byte, len(val)) copy(h.extra, val) } -func (h *Header) SetMixHash(val common.Hash) { - h.hash = atomic.Value{} // clear hash cache - h.mixHash = val -} -func (h *Header) SetNonce(val BlockNonce) { - h.hash = atomic.Value{} // clear hash cache, but NOT sealHash - h.nonce = val -} // Array accessors func (h *Header) ParentHashArray() []common.Hash { return h.parentHash } @@ -720,7 +690,6 @@ func (h *Header) SealEncode() *ProtoHeader { expansionNumber := uint64(h.ExpansionNumber()) gasLimit := h.GasLimit() gasUsed := h.GasUsed() - time := h.Time() protoSealData := &ProtoHeader{ UncleHash: &uncleHash, @@ -732,7 +701,6 @@ func (h *Header) SealEncode() *ProtoHeader { EtxSetHash: &etxSetHash, EtxRollupHash: &etxRollupHash, ReceiptHash: &receiptHash, - Difficulty: h.Difficulty().Bytes(), GasLimit: &gasLimit, GasUsed: &gasUsed, BaseFee: h.BaseFee().Bytes(), @@ -743,13 +711,10 @@ func (h *Header) SealEncode() *ProtoHeader { EfficiencyScore: &efficiencyScore, ThresholdCount: &thresholdCount, ExpansionNumber: &expansionNumber, - Location: h.Location().ProtoEncode(), - Time: &time, Extra: h.Extra(), } for i := 0; i < common.HierarchyDepth; i++ { - protoSealData.ParentHash = append(protoSealData.ParentHash, h.ParentHash(i).ProtoEncode()) protoSealData.ManifestHash = append(protoSealData.ManifestHash, h.ManifestHash(i).ProtoEncode()) if h.ParentEntropy(i) != nil { protoSealData.ParentEntropy = append(protoSealData.ParentEntropy, h.ParentEntropy(i).Bytes()) @@ -757,15 +722,19 @@ func (h *Header) SealEncode() *ProtoHeader { if h.ParentDeltaS(i) != nil { protoSealData.ParentDeltaS = append(protoSealData.ParentDeltaS, h.ParentDeltaS(i).Bytes()) } + + } + for i := 0; i < common.HierarchyDepth-1; i++ { if h.Number(i) != nil { protoSealData.Number = append(protoSealData.Number, h.Number(i).Bytes()) } + protoSealData.ParentHash = append(protoSealData.ParentHash, h.ParentHash(i).ProtoEncode()) } return protoSealData } // SealHash returns the hash of a block prior to it being sealed. -func (h *Header) SealHash() (hash common.Hash) { +func (h *Header) Hash() (hash common.Hash) { hasherMu.Lock() defer hasherMu.Unlock() hasher.Reset() @@ -779,21 +748,6 @@ func (h *Header) SealHash() (hash common.Hash) { return hash } -// Hash returns the nonce'd hash of the header. This is just the Blake3 hash of -// SealHash suffixed with a nonce. -func (h *Header) Hash() (hash common.Hash) { - sealHash := h.SealHash().Bytes() - hasherMu.Lock() - defer hasherMu.Unlock() - hasher.Reset() - var hData [40]byte - copy(hData[:], h.Nonce().Bytes()) - copy(hData[len(h.nonce):], sealHash) - sum := blake3.Sum256(hData[:]) - hash.SetBytes(sum[:]) - return hash -} - // totalBitLen returns the cumulative BitLen for each element in a big.Int slice. func totalBitLen(array []*big.Int) int { bitLen := 0 @@ -810,7 +764,7 @@ var headerSize = common.StorageSize(reflect.TypeOf(Header{}).Size()) // Size returns the approximate memory used by all internal contents. It is used // to approximate and limit the memory consumption of various caches. func (h *Header) Size() common.StorageSize { - return headerSize + common.StorageSize(len(h.extra)+(h.difficulty.BitLen()+totalBitLen(h.number))/8) + return headerSize + common.StorageSize(len(h.extra)+totalBitLen(h.number)/8) } // SanityCheck checks a few basic things -- these checks are way beyond what @@ -824,9 +778,6 @@ func (h *Header) SanityCheck() error { if h.manifestHash == nil || len(h.manifestHash) != common.HierarchyDepth { return fmt.Errorf("field cannot be `nil`: manifestHash") } - if h.difficulty == nil { - return fmt.Errorf("field cannot be `nil`: difficulty") - } if h.number == nil || len(h.number) != common.HierarchyDepth { return fmt.Errorf("field cannot be `nil`: number") } @@ -842,7 +793,7 @@ func (h *Header) SanityCheck() error { if bfLen := h.baseFee.BitLen(); bfLen > 256 { return fmt.Errorf("too large base fee: bitlen %d", bfLen) } - for i := 0; i < common.HierarchyDepth; i++ { + for i := 0; i < common.HierarchyDepth-1; i++ { if h.number == nil { return fmt.Errorf("field cannot be `nil`: number[%d]", i) } @@ -850,9 +801,6 @@ func (h *Header) SanityCheck() error { return fmt.Errorf("too large block number[%d]: bitlen %d", i, h.number[i].BitLen()) } } - if diffLen := h.difficulty.BitLen(); diffLen > 80 { - return fmt.Errorf("too large block difficulty: bitlen %d", diffLen) - } if eLen := len(h.extra); eLen > 100*1024 { return fmt.Errorf("too large block extradata: size %d", eLen) } @@ -895,202 +843,25 @@ func (h *Header) EmptyReceipts() bool { return h.ReceiptHash() == EmptyRootHash } -// Body is a simple (mutable, non-safe) data container for storing and moving -// a block's data contents (transactions and uncles) together. -type Body struct { - Transactions Transactions - Uncles []*Header - ExtTransactions Transactions - SubManifest BlockManifest - InterlinkHashes common.Hashes -} - -// ProtoEncode serializes b into the Quai Proto Body format -func (b *Body) ProtoEncode() (*ProtoBody, error) { - protoTransactions, err := b.Transactions.ProtoEncode() - if err != nil { - return nil, err - } - protoExtTransactions, err := b.ExtTransactions.ProtoEncode() - if err != nil { - return nil, err - } - protoUncles := &ProtoHeaders{} - for _, unc := range b.Uncles { - protoUncle, err := unc.ProtoEncode() - if err != nil { - return nil, err - } - protoUncles.Headers = append(protoUncles.Headers, protoUncle) - } - protoManifest, err := b.SubManifest.ProtoEncode() - if err != nil { - return nil, err - } - protoInterlinkHashes := b.InterlinkHashes.ProtoEncode() - - return &ProtoBody{ - Txs: protoTransactions, - Uncles: protoUncles, - Etxs: protoExtTransactions, - Manifest: protoManifest, - InterlinkHashes: protoInterlinkHashes, - }, nil -} - -// ProtoDecode deserializes the ProtoBody into the Body format -func (b *Body) ProtoDecode(protoBody *ProtoBody, location common.Location) error { - if protoBody.Txs == nil { - return errors.New("missing required field 'Txs' in Body") - } - if protoBody.Uncles == nil { - return errors.New("missing required field 'Uncles' in Body") - } - if protoBody.Etxs == nil { - return errors.New("missing required field 'Etxs' in Body") - } - if protoBody.Manifest == nil { - return errors.New("missing required field 'Manifest' in Body") - } - if protoBody.InterlinkHashes == nil { - return errors.New("missing required field 'InterlinkHashes' in Body") - } - - b.Transactions = Transactions{} - err := b.Transactions.ProtoDecode(protoBody.GetTxs(), location) - if err != nil { - return err - } - b.ExtTransactions = Transactions{} - err = b.ExtTransactions.ProtoDecode(protoBody.GetEtxs(), location) - if err != nil { - return err - } - b.SubManifest = BlockManifest{} - err = b.SubManifest.ProtoDecode(protoBody.GetManifest()) - if err != nil { - return err - } - b.Uncles = make([]*Header, len(protoBody.GetUncles().GetHeaders())) - for i, protoUncle := range protoBody.GetUncles().GetHeaders() { - uncle := &Header{} - err = uncle.ProtoDecode(protoUncle) - if err != nil { - return err - } - b.Uncles[i] = uncle - } - b.InterlinkHashes = common.Hashes{} - b.InterlinkHashes.ProtoDecode(protoBody.GetInterlinkHashes()) - - return nil -} - -func (b *Body) QuaiTransactions() []*Transaction { - quaiTxs := make([]*Transaction, 0) - for _, t := range b.Transactions { - if t.Type() != QiTxType { - quaiTxs = append(quaiTxs, t) - } - } - return quaiTxs -} - -// Block represents an entire block in the Quai blockchain. -type Block struct { - header *Header - uncles []*Header - transactions Transactions - extTransactions Transactions - subManifest BlockManifest - interlinkHashes common.Hashes - - // caches - size atomic.Value - appendTime atomic.Value - - // These fields are used by package eth to track - // inter-peer block relay. - ReceivedAt time.Time - ReceivedFrom interface{} -} - -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, etxs []*Transaction, subManifest BlockManifest, receipts []*Receipt, hasher TrieHasher, nodeCtx int) *Block { - b := &Block{header: CopyHeader(header)} - - // TODO: panic if len(txs) != len(receipts) - if len(txs) == 0 { - b.header.SetTxHash(EmptyRootHash) - } else { - b.header.SetTxHash(DeriveSha(Transactions(txs), hasher)) - b.transactions = make(Transactions, len(txs)) - copy(b.transactions, txs) - } - - if len(receipts) == 0 { - b.header.SetReceiptHash(EmptyRootHash) - } else { - b.header.SetReceiptHash(DeriveSha(Receipts(receipts), hasher)) - } - - if len(uncles) == 0 { - b.header.SetUncleHash(EmptyUncleHash) - } else { - b.header.SetUncleHash(CalcUncleHash(uncles)) - b.uncles = make([]*Header, len(uncles)) - for i := range uncles { - b.uncles[i] = CopyHeader(uncles[i]) - } - } - - if len(etxs) == 0 { - b.header.SetEtxHash(EmptyRootHash) - } else { - b.header.SetEtxHash(DeriveSha(Transactions(etxs), hasher)) - b.extTransactions = make(Transactions, len(etxs)) - copy(b.extTransactions, etxs) - } - - // Since the subordinate's manifest lives in our body, we still need to check - // that the manifest matches the subordinate's manifest hash, but we do not set - // the subordinate's manifest hash. - subManifestHash := EmptyRootHash - if len(subManifest) != 0 { - subManifestHash = DeriveSha(subManifest, hasher) - b.subManifest = make(BlockManifest, len(subManifest)) - copy(b.subManifest, subManifest) - } - if nodeCtx < common.ZONE_CTX && subManifestHash != b.Header().ManifestHash(nodeCtx+1) { - log.Global.Error("attempted to build block with invalid subordinate manifest") - return nil - } - - return b -} - -// NewBlockWithHeader creates a block with the given header data. The -// header data is copied, changes to header and to the field values -// will not affect the block. -func NewBlockWithHeader(header *Header) *Block { - return &Block{header: CopyHeader(header)} -} - // CopyHeader creates a deep copy of a block header to prevent side effects from // modifying a header variable. func CopyHeader(h *Header) *Header { cpy := *h - cpy.parentHash = make([]common.Hash, common.HierarchyDepth) + cpy.parentHash = make([]common.Hash, common.HierarchyDepth-1) cpy.manifestHash = make([]common.Hash, common.HierarchyDepth) cpy.parentEntropy = make([]*big.Int, common.HierarchyDepth) cpy.parentDeltaS = make([]*big.Int, common.HierarchyDepth) cpy.parentUncledSubDeltaS = make([]*big.Int, common.HierarchyDepth) cpy.number = make([]*big.Int, common.HierarchyDepth) + cpy.number = make([]*big.Int, common.HierarchyDepth-1) for i := 0; i < common.HierarchyDepth; i++ { - cpy.SetParentHash(h.ParentHash(i), i) cpy.SetManifestHash(h.ManifestHash(i), i) cpy.SetParentEntropy(h.ParentEntropy(i), i) cpy.SetParentDeltaS(h.ParentDeltaS(i), i) cpy.SetParentUncledSubDeltaS(h.ParentUncledSubDeltaS(i), i) + } + for i := 0; i < common.HierarchyDepth-1; i++ { + cpy.SetParentHash(h.ParentHash(i), i) cpy.SetNumber(h.Number(i), i) } cpy.SetUncledS(h.UncledS()) @@ -1108,7 +879,6 @@ func CopyHeader(h *Header) *Header { cpy.extra = make([]byte, len(h.extra)) copy(cpy.extra, h.extra) } - cpy.SetDifficulty(h.Difficulty()) cpy.SetGasLimit(h.GasLimit()) cpy.SetGasUsed(h.GasUsed()) cpy.SetEfficiencyScore(h.EfficiencyScore()) @@ -1116,230 +886,34 @@ func CopyHeader(h *Header) *Header { cpy.SetExpansionNumber(h.ExpansionNumber()) cpy.SetEtxEligibleSlices(h.EtxEligibleSlices()) cpy.SetBaseFee(h.BaseFee()) - cpy.SetLocation(h.location) - cpy.SetTime(h.time) - cpy.SetNonce(h.nonce) return &cpy } -// ProtoEncode serializes h into the Quai Proto Block format -func (b *Block) ProtoEncode() (*ProtoBlock, error) { - protoHeader, err := b.header.ProtoEncode() - if err != nil { - return nil, err - } - protoBody, err := b.Body().ProtoEncode() - if err != nil { - return nil, err - } - protoBlock := &ProtoBlock{ - Header: protoHeader, - Body: protoBody, - } - return protoBlock, nil -} - -// ProtoEncode deserializes th ProtoHeader into the Header format -func (b *Block) ProtoDecode(protoBlock *ProtoBlock, location common.Location) error { - b.header = &Header{} - err := b.header.ProtoDecode(protoBlock.GetHeader()) - if err != nil { - return err - } - body := &Body{} - err = body.ProtoDecode(protoBlock.GetBody(), location) - if err != nil { - return err - } - b.transactions = body.Transactions - b.extTransactions = body.ExtTransactions - b.uncles = body.Uncles - b.subManifest = body.SubManifest - b.interlinkHashes = body.InterlinkHashes - return nil -} - -// Wrapped header accessors -func (b *Block) ParentHash(nodeCtx int) common.Hash { return b.header.ParentHash(nodeCtx) } -func (b *Block) UncleHash() common.Hash { return b.header.UncleHash() } -func (b *Block) Coinbase() common.Address { return b.header.Coinbase() } -func (b *Block) EVMRoot() common.Hash { return b.header.EVMRoot() } -func (b *Block) UTXORoot() common.Hash { return b.header.UTXORoot() } -func (b *Block) TxHash() common.Hash { return b.header.TxHash() } -func (b *Block) EtxHash() common.Hash { return b.header.EtxHash() } -func (b *Block) EtxSetHash() common.Hash { return b.header.EtxSetHash() } -func (b *Block) EtxRollupHash() common.Hash { return b.header.EtxRollupHash() } -func (b *Block) ManifestHash(nodeCtx int) common.Hash { return b.header.ManifestHash(nodeCtx) } -func (b *Block) ReceiptHash() common.Hash { return b.header.ReceiptHash() } -func (b *Block) Difficulty(nodeCtx int) *big.Int { return b.header.Difficulty() } -func (b *Block) ParentEntropy(nodeCtx int) *big.Int { return b.header.ParentEntropy(nodeCtx) } -func (b *Block) ParentDeltaS(nodeCtx int) *big.Int { return b.header.ParentDeltaS(nodeCtx) } -func (b *Block) Number(nodeCtx int) *big.Int { return b.header.Number(nodeCtx) } -func (b *Block) NumberU64(nodeCtx int) uint64 { return b.header.NumberU64(nodeCtx) } -func (b *Block) GasLimit() uint64 { return b.header.GasLimit() } -func (b *Block) GasUsed() uint64 { return b.header.GasUsed() } -func (b *Block) BaseFee() *big.Int { return b.header.BaseFee() } -func (b *Block) Location() common.Location { return b.header.Location() } -func (b *Block) Time() uint64 { return b.header.Time() } -func (b *Block) Extra() []byte { return b.header.Extra() } -func (b *Block) Nonce() BlockNonce { return b.header.Nonce() } -func (b *Block) NonceU64() uint64 { return b.header.NonceU64() } - -// TODO: copies - -func (b *Block) Uncles() []*Header { return b.uncles } -func (b *Block) Transactions() Transactions { return b.transactions } -func (b *Block) Transaction(hash common.Hash) *Transaction { - for _, transaction := range b.Transactions() { - if transaction.Hash() == hash { - return transaction - } - } - return nil -} -func (b *Block) ExtTransactions() Transactions { return b.extTransactions } -func (b *Block) ExtTransaction(hash common.Hash) *Transaction { - for _, transaction := range b.ExtTransactions() { - if transaction.Hash() == hash { - return transaction - } - } - return nil -} -func (b *Block) SubManifest() BlockManifest { return b.subManifest } -func (b *Block) InterlinkHashes() common.Hashes { return b.interlinkHashes } - -func (b *Block) Header() *Header { return b.header } - -func (b *Block) QiTransactions() []*Transaction { - // TODO: cache the UTXO loop - qiTxs := make([]*Transaction, 0) - for _, t := range b.Transactions() { - if t.Type() == QiTxType { - qiTxs = append(qiTxs, t) - } - } - return qiTxs -} - -func (b *Block) QuaiTransactions() []*Transaction { - quaiTxs := make([]*Transaction, 0) - for _, t := range b.Transactions() { - if t.Type() != QiTxType && (t.To() == nil || t.To().IsInQuaiLedgerScope()) { - quaiTxs = append(quaiTxs, t) - } - } - return quaiTxs -} - -// Body returns the non-header content of the block. -func (b *Block) Body() *Body { - return &Body{b.transactions, b.uncles, b.extTransactions, b.subManifest, b.interlinkHashes} -} - -// Size returns the true RLP encoded storage size of the block, either by encoding -// and returning it, or returning a previsouly cached value. -func (b *Block) Size() common.StorageSize { - if size := b.size.Load(); size != nil { - return size.(common.StorageSize) - } - c := writeCounter(0) - rlp.Encode(&c, b) - b.size.Store(common.StorageSize(c)) - return common.StorageSize(c) -} - -// SanityCheck can be used to prevent that unbounded fields are -// stuffed with junk data to add processing overhead -func (b *Block) SanityCheck() error { - return b.header.SanityCheck() -} - -type writeCounter common.StorageSize - -func (c *writeCounter) Write(b []byte) (int, error) { - *c += writeCounter(len(b)) - return len(b), nil -} - -func CalcUncleHash(uncles []*Header) common.Hash { - if len(uncles) == 0 { - return EmptyUncleHash - } - return RlpHash(uncles) -} - -// WithSeal returns a new block with the data from b but the header replaced with -// the sealed one. -func (b *Block) WithSeal(header *Header) *Block { - return &Block{ - header: CopyHeader(header), - transactions: b.transactions, - uncles: b.uncles, - extTransactions: b.extTransactions, - subManifest: b.subManifest, - } -} - -// WithBody returns a new block with the given transaction and uncle contents, for a single context -func (b *Block) WithBody(transactions []*Transaction, uncles []*Header, extTransactions []*Transaction, subManifest BlockManifest, interlinkHashes common.Hashes) *Block { - block := &Block{ - header: CopyHeader(b.header), - transactions: make([]*Transaction, len(transactions)), - uncles: make([]*Header, len(uncles)), - extTransactions: make([]*Transaction, len(extTransactions)), - subManifest: make(BlockManifest, len(subManifest)), - interlinkHashes: make(common.Hashes, len(interlinkHashes)), - } - copy(block.transactions, transactions) - copy(block.extTransactions, extTransactions) - copy(block.subManifest, subManifest) - copy(block.interlinkHashes, interlinkHashes) - for i := range uncles { - block.uncles[i] = CopyHeader(uncles[i]) - } - return block -} - -// Hash returns the keccak256 hash of b's header. -// The hash is computed on the first call and cached thereafter. -func (b *Block) Hash() common.Hash { - return b.header.Hash() -} - -// GetAppendTime returns the appendTime of the block -// The appendTime is computed on the first call and cached thereafter. -func (b *Block) GetAppendTime() time.Duration { - if appendTime := b.appendTime.Load(); appendTime != nil { - if val, ok := appendTime.(time.Duration); ok { - return val - } - } - return -1 -} - -func (b *Block) SetAppendTime(appendTime time.Duration) { - b.appendTime.Store(appendTime) -} - -type Blocks []*Block - // PendingHeader stores the header and termini value associated with the header. type PendingHeader struct { - header *Header `json:"header"` - termini Termini `json:"termini"` + wo *WorkObject `json:"wo"` + termini Termini `json:"termini"` } // accessor methods for pending header func (ph PendingHeader) Header() *Header { - return ph.header + return ph.wo.woBody.header +} + +func (ph PendingHeader) WorkObject() *WorkObject { + return ph.wo } + func (ph PendingHeader) Termini() Termini { return ph.termini } -func (ph *PendingHeader) SetHeader(header *Header) { - ph.header = CopyHeader(header) +func (ph *PendingHeader) SetHeader(header *WorkObject) { + ph.wo = header +} + +func (ph *PendingHeader) SetWorkObject(wo *WorkObject) { + ph.wo = wo } func (ph *PendingHeader) SetTermini(termini Termini) { @@ -1352,37 +926,37 @@ func EmptyPendingHeader() PendingHeader { return pendingHeader } -func NewPendingHeader(header *Header, termini Termini) PendingHeader { +func NewPendingHeader(wo *WorkObject, termini Termini) PendingHeader { emptyPh := EmptyPendingHeader() - emptyPh.SetHeader(header) + emptyPh.wo = CopyWorkObject(wo) emptyPh.SetTermini(termini) return emptyPh } func CopyPendingHeader(ph *PendingHeader) *PendingHeader { cpy := *ph - cpy.SetHeader(CopyHeader(ph.Header())) + cpy.SetHeader(CopyWorkObject(ph.wo)) cpy.SetTermini(CopyTermini(ph.Termini())) return &cpy } // ProtoEncode serializes h into the Quai Proto PendingHeader format func (ph PendingHeader) ProtoEncode() (*ProtoPendingHeader, error) { - protoHeader, err := ph.Header().ProtoEncode() + protoWorkObject, err := ph.WorkObject().ProtoEncode(PEtxObject) if err != nil { return nil, err } protoTermini := ph.Termini().ProtoEncode() return &ProtoPendingHeader{ - Header: protoHeader, + Wo: protoWorkObject, Termini: protoTermini, }, nil } // ProtoEncode deserializes the ProtoHeader into the Header format -func (ph *PendingHeader) ProtoDecode(protoPendingHeader *ProtoPendingHeader) error { - ph.header = &Header{} - err := ph.header.ProtoDecode(protoPendingHeader.GetHeader()) +func (ph *PendingHeader) ProtoDecode(protoPendingHeader *ProtoPendingHeader, location common.Location) error { + ph.wo = &WorkObject{} + err := ph.wo.ProtoDecode(protoPendingHeader.GetWo(), location, PEtxObject) if err != nil { return err } @@ -1396,7 +970,7 @@ func (ph *PendingHeader) ProtoDecode(protoPendingHeader *ProtoPendingHeader) err // "external" pending header encoding. used for rlp type extPendingHeader struct { - Header *Header + Wo *WorkObject Termini Termini } @@ -1408,24 +982,6 @@ func (t Termini) RPCMarshalTermini() map[string]interface{} { return result } -// DecodeRLP decodes the Quai RLP encoding into pending header format. -func (p *PendingHeader) DecodeRLP(s *rlp.Stream) error { - var eb extPendingHeader - if err := s.Decode(&eb); err != nil { - return err - } - p.header, p.termini = eb.Header, eb.Termini - return nil -} - -// EncodeRLP serializes b into the Quai RLP format. -func (p PendingHeader) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, extPendingHeader{ - Header: p.header, - Termini: p.termini, - }) -} - // Termini stores the dom terminus (i.e the previous dom block) and // subTermini(i.e the dom blocks that have occured in the subordinate chains) type Termini struct { diff --git a/core/types/etx_set.go b/core/types/etx_set.go index bed2ffeeb5..7f5a10a464 100644 --- a/core/types/etx_set.go +++ b/core/types/etx_set.go @@ -46,7 +46,7 @@ func (set *EtxSet) ProtoEncode() *ProtoEtxSet { } // ProtoDecode decodes the EtxSet from protobuf format. -func (set *EtxSet) ProtoDecode(protoSet *ProtoEtxSet, location common.Location) error { +func (set *EtxSet) ProtoDecode(protoSet *ProtoEtxSet) error { set.ETXHashes = protoSet.GetEtxHashes() return nil } diff --git a/core/types/external_tx.go b/core/types/external_tx.go index 2dc4c39987..95123e3481 100644 --- a/core/types/external_tx.go +++ b/core/types/external_tx.go @@ -31,7 +31,7 @@ type ExternalTx struct { // PendingEtxsRollup is Header and EtxRollups of that header that should // be forward propagated type PendingEtxsRollup struct { - Header *Header `json:"header" gencodec:"required"` + Header *WorkObject `json:"header" gencodec:"required"` EtxsRollup Transactions `json:"etxsrollup" gencodec:"required"` } @@ -45,7 +45,7 @@ func (p *PendingEtxsRollup) IsValid(hasher TrieHasher) bool { // ProtoEncode encodes the PendingEtxsRollup to protobuf format. func (p *PendingEtxsRollup) ProtoEncode() (*ProtoPendingEtxsRollup, error) { - header, err := p.Header.ProtoEncode() + header, err := p.Header.ProtoEncode(PEtxObject) if err != nil { return nil, err } @@ -64,8 +64,8 @@ func (p *PendingEtxsRollup) ProtoDecode(protoPendingEtxsRollup *ProtoPendingEtxs if protoPendingEtxsRollup.Header == nil { return errors.New("header is nil in ProtoDecode") } - p.Header = new(Header) - err := p.Header.ProtoDecode(protoPendingEtxsRollup.GetHeader()) + p.Header = new(WorkObject) + err := p.Header.ProtoDecode(protoPendingEtxsRollup.GetHeader(), location, PEtxObject) if err != nil { return err } @@ -86,7 +86,7 @@ func (p *PendingEtxsRollup) ProtoDecode(protoPendingEtxsRollup *ProtoPendingEtxs // itself, so the Etxs list will just contain the ETXs emitted directly in that // zone block (a.k.a. a singleton). type PendingEtxs struct { - Header *Header `json:"header" gencodec:"required"` + Header *WorkObject `json:"header" gencodec:"required"` Etxs Transactions `json:"etxs" gencodec:"required"` } @@ -100,7 +100,7 @@ func (p *PendingEtxs) IsValid(hasher TrieHasher) bool { // ProtoEncode encodes the PendingEtxs to protobuf format. func (p *PendingEtxs) ProtoEncode() (*ProtoPendingEtxs, error) { - header, err := p.Header.ProtoEncode() + header, err := p.Header.ProtoEncode(PEtxObject) if err != nil { return nil, err } @@ -115,17 +115,17 @@ func (p *PendingEtxs) ProtoEncode() (*ProtoPendingEtxs, error) { } // ProtoDecode decodes the protobuf to a PendingEtxs representation. -func (p *PendingEtxs) ProtoDecode(protoPendingEtxs *ProtoPendingEtxs) error { +func (p *PendingEtxs) ProtoDecode(protoPendingEtxs *ProtoPendingEtxs, location common.Location) error { if protoPendingEtxs.Header == nil { return errors.New("header is nil in ProtoDecode") } - p.Header = new(Header) - err := p.Header.ProtoDecode(protoPendingEtxs.GetHeader()) + p.Header = new(WorkObject) + err := p.Header.ProtoDecode(protoPendingEtxs.GetHeader(), location, PEtxObject) if err != nil { return err } p.Etxs = Transactions{} - err = p.Etxs.ProtoDecode(protoPendingEtxs.GetEtxs(), p.Header.Location()) + err = p.Etxs.ProtoDecode(protoPendingEtxs.GetEtxs(), location) if err != nil { return err } diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go index 0116e540d7..6286beeae6 100644 --- a/core/types/gen_header_json.go +++ b/core/types/gen_header_json.go @@ -16,51 +16,49 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h Header) MarshalJSON() ([]byte, error) { var enc struct { - ParentHash []common.Hash `json:"parentHash" gencodec:"required"` - UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase common.Address `json:"miner" gencodec:"required"` - EVMRoot common.Hash `json:"evmRoot" gencodec:"required"` - UTXORoot common.Hash `json:"utxoRoot" gencodec:"required"` - TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` - EtxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` - EtxSetHash common.Hash `json:"etxSetHash" gencodec:"required"` - EtxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` - ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` - ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` - ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` - ParentUncledSubDeltaS []*hexutil.Big `json:"parentUncledSubDeltaS" gencodec:"required"` - PrimeTerminus common.Hash `json:"primeTerminus" gencodec:"required"` - InterlinkRootHash common.Hash `json:"interlinkRootHash" gencodec:"required"` - UncledS *hexutil.Big `json:"uncledS" gencodec:"required"` - Number []*hexutil.Big `json:"number" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - EfficiencyScore hexutil.Uint64 `json:"efficiencyScore" gencodec:"required"` - ThresholdCount hexutil.Uint64 `json:"thresholdCount" gencodec:"required"` - ExpansionNumber hexutil.Uint64 `json:"expansionNumber" gencodec:"required"` - EtxEligibleSlices common.Hash `json:"etxEligibleSlices" gencodec:"required"` - BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - Location hexutil.Bytes `json:"location" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixHash common.Hash `json:"mixHash" gencodec:"required"` - Nonce BlockNonce `json:"nonce"` - Hash common.Hash `json:"hash"` + ParentHash []common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner" gencodec:"required"` + EVMRoot common.Hash `json:"evmRoot" gencodec:"required"` + UTXORoot common.Hash `json:"utxoRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + EtxHash common.Hash `json:"extTransactionsRoot" gencodec:"required"` + EtxSetHash common.Hash `json:"etxSetHash" gencodec:"required"` + EtxRollupHash common.Hash `json:"extRollupRoot" gencodec:"required"` + ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` + PrimeTerminus common.Hash `json:"primeTerminus" gencodec:"required"` + InterlinkRootHash common.Hash `json:"interlinkRootHash" gencodec:"required"` + ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` + ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` + ParentUncledSubDeltaS []*hexutil.Big `json:"parentUncledSubDeltaS" gencodec:"required"` + EfficiencyScore hexutil.Uint64 `json:"efficiencyScore" gencodec:"required"` + ThresholdCount hexutil.Uint64 `json:"thresholdCount" gencodec:"required"` + ExpansionNumber hexutil.Uint64 `json:"expansionNumber" gencodec:"required"` + EtxEligibleSlices common.Hash `json:"etxEligibleSlices" gencodec:"required"` + UncledS *hexutil.Big `json:"uncledS" gencodec:"required"` + Number []*hexutil.Big `json:"number" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData" gencodec:"required"` + Nonce BlockNonce `json:"nonce"` } // Initialize the enc struct enc.ParentEntropy = make([]*hexutil.Big, common.HierarchyDepth) enc.ParentDeltaS = make([]*hexutil.Big, common.HierarchyDepth) enc.ParentUncledSubDeltaS = make([]*hexutil.Big, common.HierarchyDepth) - enc.Number = make([]*hexutil.Big, common.HierarchyDepth) + enc.ParentHash = make([]common.Hash, common.HierarchyDepth-1) + enc.Number = make([]*hexutil.Big, common.HierarchyDepth-1) - copy(enc.ParentHash, h.ParentHashArray()) copy(enc.ManifestHash, h.ManifestHashArray()) for i := 0; i < common.HierarchyDepth; i++ { enc.ParentEntropy[i] = (*hexutil.Big)(h.ParentEntropy(i)) enc.ParentDeltaS[i] = (*hexutil.Big)(h.ParentDeltaS(i)) enc.ParentUncledSubDeltaS[i] = (*hexutil.Big)(h.ParentUncledSubDeltaS(i)) + } + for i :=0 ; i< common.HierarchyDepth-1; i++ { + enc.ParentHash[i] = h.ParentHash(i) enc.Number[i] = (*hexutil.Big)(h.Number(i)) } enc.UncleHash = h.UncleHash() @@ -74,7 +72,6 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.ReceiptHash = h.ReceiptHash() enc.PrimeTerminus = h.PrimeTerminus() enc.InterlinkRootHash = h.InterlinkRootHash() - enc.Difficulty = (*hexutil.Big)(h.Difficulty()) enc.UncledS = (*hexutil.Big)(h.UncledS()) enc.GasLimit = hexutil.Uint64(h.GasLimit()) enc.GasUsed = hexutil.Uint64(h.GasUsed()) @@ -83,12 +80,7 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.ExpansionNumber = hexutil.Uint64(h.ExpansionNumber()) enc.EtxEligibleSlices = h.EtxEligibleSlices() enc.BaseFee = (*hexutil.Big)(h.BaseFee()) - enc.Location = hexutil.Bytes(h.Location()) - enc.Time = hexutil.Uint64(h.Time()) enc.Extra = hexutil.Bytes(h.Extra()) - enc.MixHash = h.MixHash() - enc.Nonce = h.Nonce() - enc.Hash = h.Hash() raw, err := json.Marshal(&enc) return raw, err } @@ -96,37 +88,33 @@ func (h Header) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (h *Header) UnmarshalJSON(input []byte) error { var dec struct { - ParentHash []common.Hash `json:"parentHash" gencodec:"required"` - UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase *common.AddressBytes `json:"miner" gencodec:"required"` - EVMRoot *common.Hash `json:"evmRoot" gencodec:"required"` - UTXORoot *common.Hash `json:"utxoRoot" gencodec:"required"` - TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` - EtxHash *common.Hash `json:"extTransactionsRoot" gencodec:"required"` - EtxSetHash *common.Hash `json:"etxSetHash" gencodec:"required"` - EtxRollupHash *common.Hash `json:"extRollupRoot" gencodec:"required"` - ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` - PrimeTerminus *common.Hash `json:"primeTerminus" gencodec:"required"` - InterlinkRootHash *common.Hash `json:"interlinkRootHash" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` - ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` - ParentUncledSubDeltaS []*hexutil.Big `json:"parentUncledSubDeltaS" gencodec:"required"` - EfficiencyScore *hexutil.Uint64 `json:"efficiencyScore" gencodec:"required"` - ThresholdCount *hexutil.Uint64 `json:"thresholdCount" gencodec:"required"` - ExpansionNumber *hexutil.Uint64 `json:"expansionNumber" gencodec:"required"` - EtxEligibleSlices *common.Hash `json:"etxEligibleSlices" gencodec:"required"` - UncledS *hexutil.Big `json:"uncledS" gencodec:"required"` - Number []*hexutil.Big `json:"number" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - Location hexutil.Bytes `json:"location" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixHash *common.Hash `json:"MixHash" gencodec:"required"` - Nonce BlockNonce `json:"nonce"` + ParentHash []common.Hash `json:"parentHash" gencodec:"required"` + UncleHash *common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase *common.AddressBytes `json:"miner" gencodec:"required"` + EVMRoot *common.Hash `json:"evmRoot" gencodec:"required"` + UTXORoot *common.Hash `json:"utxoRoot" gencodec:"required"` + TxHash *common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash *common.Hash `json:"receiptsRoot" gencodec:"required"` + EtxHash *common.Hash `json:"extTransactionsRoot" gencodec:"required"` + EtxSetHash *common.Hash `json:"etxSetHash" gencodec:"required"` + EtxRollupHash *common.Hash `json:"extRollupRoot" gencodec:"required"` + ManifestHash []common.Hash `json:"manifestHash" gencodec:"required"` + PrimeTerminus *common.Hash `json:"primeTerminus" gencodec:"required"` + InterlinkRootHash *common.Hash `json:"interlinkRootHash" gencodec:"required"` + ParentEntropy []*hexutil.Big `json:"parentEntropy" gencodec:"required"` + ParentDeltaS []*hexutil.Big `json:"parentDeltaS" gencodec:"required"` + ParentUncledSubDeltaS []*hexutil.Big `json:"parentUncledSubDeltaS" gencodec:"required"` + EfficiencyScore *hexutil.Uint64 `json:"efficiencyScore" gencodec:"required"` + ThresholdCount *hexutil.Uint64 `json:"thresholdCount" gencodec:"required"` + ExpansionNumber *hexutil.Uint64 `json:"expansionNumber" gencodec:"required"` + EtxEligibleSlices *common.Hash `json:"etxEligibleSlices" gencodec:"required"` + UncledS *hexutil.Big `json:"uncledS" gencodec:"required"` + Number []*hexutil.Big `json:"number" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + BaseFee *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData" gencodec:"required"` + Nonce BlockNonce `json:"nonce"` } if err := json.Unmarshal(input, &dec); err != nil { return err @@ -170,9 +158,6 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.InterlinkRootHash == nil { return errors.New("missing required field 'interlinkRootHash' for Header") } - if dec.Difficulty == nil { - return errors.New("missing required field 'difficulty' for Header") - } if dec.ParentEntropy == nil { return errors.New("missing required field 'parentEntropy' for Header") } @@ -209,19 +194,15 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.Extra == nil { return errors.New("missing required field 'extraData' for Header") } - if dec.MixHash == nil { - return errors.New("missing required field 'mixHash' for Header") - } // Initialize the header - h.parentHash = make([]common.Hash, common.HierarchyDepth) + h.parentHash = make([]common.Hash, common.HierarchyDepth-1) h.manifestHash = make([]common.Hash, common.HierarchyDepth) h.parentEntropy = make([]*big.Int, common.HierarchyDepth) h.parentDeltaS = make([]*big.Int, common.HierarchyDepth) h.parentUncledSubDeltaS = make([]*big.Int, common.HierarchyDepth) - h.number = make([]*big.Int, common.HierarchyDepth) + h.number = make([]*big.Int, common.HierarchyDepth-1) for i := 0; i < common.HierarchyDepth; i++ { - h.SetParentHash(dec.ParentHash[i], i) h.SetManifestHash(dec.ManifestHash[i], i) if dec.ParentEntropy[i] == nil { return errors.New("missing required field 'parentEntropy' for Header") @@ -235,17 +216,18 @@ func (h *Header) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'parentUncledDeltaS' for Header") } h.SetParentUncledSubDeltaS((*big.Int)(dec.ParentUncledSubDeltaS[i]), i) + } + + for i := 0; i < common.HierarchyDepth-1; i++ { + h.SetParentHash(dec.ParentHash[i], i) if dec.Number[i] == nil { return errors.New("missing required field 'number' for Header") } h.SetNumber((*big.Int)(dec.Number[i]), i) } + h.SetUncleHash(*dec.UncleHash) - if len(dec.Location) > 0 { - h.location = make([]byte, len(dec.Location)) - copy(h.location, dec.Location) - } - coinbase := common.Bytes20ToAddress(*dec.Coinbase, h.location) + coinbase := common.Bytes20ToAddress(*dec.Coinbase, []byte{0,0}) h.SetCoinbase(coinbase) h.SetEVMRoot(*dec.EVMRoot) h.SetUTXORoot(*dec.UTXORoot) @@ -256,7 +238,6 @@ func (h *Header) UnmarshalJSON(input []byte) error { h.SetEtxRollupHash(*dec.EtxRollupHash) h.SetPrimeTerminus(*dec.PrimeTerminus) h.SetInterlinkRootHash(*dec.InterlinkRootHash) - h.SetDifficulty((*big.Int)(dec.Difficulty)) h.SetUncledS((*big.Int)(dec.UncledS)) h.SetGasLimit(uint64(*dec.GasLimit)) h.SetGasUsed(uint64(*dec.GasUsed)) @@ -265,10 +246,7 @@ func (h *Header) UnmarshalJSON(input []byte) error { h.SetExpansionNumber(uint8(*dec.ExpansionNumber)) h.SetEtxEligibleSlices(*dec.EtxEligibleSlices) h.SetBaseFee((*big.Int)(dec.BaseFee)) - h.SetTime(uint64(dec.Time)) h.SetExtra(dec.Extra) - h.SetMixHash(*dec.MixHash) - h.SetNonce(dec.Nonce) return nil } @@ -301,3 +279,140 @@ func (t *Termini) UnmarshalJSON(input []byte) error { t.SetSubTermini(dec.SubTermini) return nil } + +func (wh *WorkObjectHeader) MarshalJSON() ([]byte, error) { + var enc struct { + HeaderHash common.Hash `json:"headerHash" gencoden:"required"` + ParentHash common.Hash `json:"parentHash" gencoden:"required"` + Number *hexutil.Big `json:"number" gencoden:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencoden:"required"` + TxHash common.Hash `json:"txHash" gencoden:"required"` + Location hexutil.Bytes `json:"location" gencoden:"required"` + MixHash common.Hash `json:"mixHash" gencoden:"required"` + Time hexutil.Uint64 `json:"time" gencoden:"required"` + Nonce BlockNonce `json:"nonce" gencoden:"required"` + } + + enc.HeaderHash = wh.HeaderHash() + enc.Difficulty = (*hexutil.Big)(wh.Difficulty()) + enc.Number = (*hexutil.Big)(wh.Number()) + enc.TxHash = wh.TxHash() + enc.Location = hexutil.Bytes(wh.Location()) + enc.MixHash = wh.MixHash() + enc.Time = hexutil.Uint64(wh.Time()) + enc.Nonce = wh.Nonce() + + raw, err := json.Marshal(&enc) + return raw, err +} + +func (wh *WorkObjectHeader) UnmarshalJSON(input []byte) error { + var dec struct { + HeaderHash common.Hash `json:"headerHash" gencoden:"required"` + ParentHash common.Hash `json:"parentHash" gencoden:"required"` + Number *hexutil.Big `json:"number" gencoden:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencoden:"required"` + TxHash common.Hash `json:"txHash" gencoden:"required"` + Location hexutil.Bytes `json:"location" gencoden:"required"` + MixHash common.Hash `json:"mixHash" gencoden:"required"` + Time hexutil.Uint64 `json:"time" gencoden:"required"` + Nonce BlockNonce `json:"nonce" gencoden:"required"` + } + + err := json.Unmarshal(input, &dec) + if err != nil { + return err + } + + wh.SetHeaderHash(dec.HeaderHash) + wh.SetParentHash(dec.ParentHash) + wh.SetNumber((*big.Int)(dec.Number)) + wh.SetDifficulty((*big.Int)(dec.Difficulty)) + wh.SetTxHash(dec.TxHash) + if len(dec.Location) > 0 { + wh.location = make([]byte, len(dec.Location)) + copy(wh.location, dec.Location) + } + wh.SetMixHash(dec.MixHash) + wh.SetTime(uint64(dec.Time)) + wh.SetNonce(dec.Nonce) + return nil +} + +func (wb *WorkObjectBody) MarshalJSON() ([]byte, error) { + var enc struct { + Header *Header `json:"header" gencoden:"required"` + Transactions Transactions `json:"transactions" gencoden:"required"` + ExtTransactions Transactions `json:"extTransactions" gencoden:"required"` + Uncles []*WorkObjectHeader `json:"uncles" gencoden:"required"` + Manifest BlockManifest `json:"manifest" gencoden:"required"` + InterlinkHashes common.Hashes `json:"interlinkHashes" gencoden:"required"` + } + + enc.Header = wb.Header() + enc.Transactions = wb.Transactions() + enc.ExtTransactions = wb.ExtTransactions() + enc.Uncles = wb.Uncles() + enc.Manifest = wb.Manifest() + enc.InterlinkHashes = wb.InterlinkHashes() + + raw, err := json.Marshal(&enc) + return raw, err +} + +func (wb *WorkObjectBody) UnmarshalJSON(input []byte) error { + var dec struct { + Header *Header `json:"header" gencoden:"required"` + Transactions Transactions `json:"transactions" gencoden:"required"` + ExtTransactions Transactions `json:"extTransactions" gencoden:"required"` + Uncles []*WorkObjectHeader `json:"uncles" gencoden:"required"` + Manifest BlockManifest `json:"manifest" gencoden:"required"` + InterlinkHashes common.Hashes `json:"interlinkHashes" gencoden:"required"` + } + + err := json.Unmarshal(input, &dec) + if err != nil { + return err + } + + wb.SetHeader(dec.Header) + wb.SetTransactions(dec.Transactions) + wb.SetExtTransactions(dec.ExtTransactions) + wb.SetUncles(dec.Uncles) + wb.SetManifest(dec.Manifest) + wb.SetInterlinkHashes(dec.InterlinkHashes) + return nil +} + +func (wo *WorkObject) MarshalJSON() ([]byte, error) { + var enc struct { + WoHeader *WorkObjectHeader `json:"woHeader" gencoden:"required"` + WoBody *WorkObjectBody `json:"woBody" gencoden:"required"` + Tx *Transaction `json:"tx" gencoden:"required"` + } + + enc.WoHeader = wo.WorkObjectHeader() + enc.WoBody = wo.Body() + enc.Tx = wo.Tx() + + raw, err := json.Marshal(&enc) + return raw, err +} + +func (wo *WorkObject) UnmarshalJSON(input []byte) error { + var dec struct { + WoHeader *WorkObjectHeader `json:"woHeader" gencoden:"required"` + WoBody *WorkObjectBody `json:"woBody" gencoden:"required"` + Tx *Transaction `json:"tx" gencoden:"required"` + } + + err := json.Unmarshal(input, &dec) + if err != nil { + return err + } + + wo.SetWorkObjectHeader(dec.WoHeader) + wo.SetBody(dec.WoBody) + wo.SetTx(dec.Tx) + return nil +} \ No newline at end of file diff --git a/core/types/proto_block.pb.go b/core/types/proto_block.pb.go index bc87c9a22d..94133d4326 100644 --- a/core/types/proto_block.pb.go +++ b/core/types/proto_block.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.30.0 +// protoc v4.25.1 // source: core/types/proto_block.proto package types @@ -21,141 +21,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This file defines all the ProtoBuf definitions related to core -type ProtoBlock struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Header *ProtoHeader `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"` - Body *ProtoBody `protobuf:"bytes,2,opt,name=body,proto3,oneof" json:"body,omitempty"` -} - -func (x *ProtoBlock) Reset() { - *x = ProtoBlock{} - if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProtoBlock) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProtoBlock) ProtoMessage() {} - -func (x *ProtoBlock) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProtoBlock.ProtoReflect.Descriptor instead. -func (*ProtoBlock) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{0} -} - -func (x *ProtoBlock) GetHeader() *ProtoHeader { - if x != nil { - return x.Header - } - return nil -} - -func (x *ProtoBlock) GetBody() *ProtoBody { - if x != nil { - return x.Body - } - return nil -} - -type ProtoBody struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Txs *ProtoTransactions `protobuf:"bytes,1,opt,name=txs,proto3,oneof" json:"txs,omitempty"` - Uncles *ProtoHeaders `protobuf:"bytes,2,opt,name=uncles,proto3,oneof" json:"uncles,omitempty"` - Etxs *ProtoTransactions `protobuf:"bytes,3,opt,name=etxs,proto3,oneof" json:"etxs,omitempty"` - Manifest *ProtoManifest `protobuf:"bytes,4,opt,name=manifest,proto3,oneof" json:"manifest,omitempty"` - InterlinkHashes *common.ProtoHashes `protobuf:"bytes,5,opt,name=interlink_hashes,json=interlinkHashes,proto3,oneof" json:"interlink_hashes,omitempty"` -} - -func (x *ProtoBody) Reset() { - *x = ProtoBody{} - if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ProtoBody) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ProtoBody) ProtoMessage() {} - -func (x *ProtoBody) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ProtoBody.ProtoReflect.Descriptor instead. -func (*ProtoBody) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{1} -} - -func (x *ProtoBody) GetTxs() *ProtoTransactions { - if x != nil { - return x.Txs - } - return nil -} - -func (x *ProtoBody) GetUncles() *ProtoHeaders { - if x != nil { - return x.Uncles - } - return nil -} - -func (x *ProtoBody) GetEtxs() *ProtoTransactions { - if x != nil { - return x.Etxs - } - return nil -} - -func (x *ProtoBody) GetManifest() *ProtoManifest { - if x != nil { - return x.Manifest - } - return nil -} - -func (x *ProtoBody) GetInterlinkHashes() *common.ProtoHashes { - if x != nil { - return x.InterlinkHashes - } - return nil -} - type ProtoHeader struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -180,24 +45,23 @@ type ProtoHeader struct { GasUsed *uint64 `protobuf:"varint,17,opt,name=gas_used,json=gasUsed,proto3,oneof" json:"gas_used,omitempty"` BaseFee []byte `protobuf:"bytes,18,opt,name=base_fee,json=baseFee,proto3,oneof" json:"base_fee,omitempty"` Location *common.ProtoLocation `protobuf:"bytes,19,opt,name=location,proto3,oneof" json:"location,omitempty"` - Time *uint64 `protobuf:"varint,20,opt,name=time,proto3,oneof" json:"time,omitempty"` - Extra []byte `protobuf:"bytes,21,opt,name=extra,proto3,oneof" json:"extra,omitempty"` - MixHash *common.ProtoHash `protobuf:"bytes,22,opt,name=mix_hash,json=mixHash,proto3,oneof" json:"mix_hash,omitempty"` - Nonce *uint64 `protobuf:"varint,23,opt,name=nonce,proto3,oneof" json:"nonce,omitempty"` - UtxoRoot *common.ProtoHash `protobuf:"bytes,24,opt,name=utxo_root,json=utxoRoot,proto3,oneof" json:"utxo_root,omitempty"` - EtxSetHash *common.ProtoHash `protobuf:"bytes,25,opt,name=etx_set_hash,json=etxSetHash,proto3,oneof" json:"etx_set_hash,omitempty"` - EfficiencyScore *uint64 `protobuf:"varint,26,opt,name=efficiency_score,json=efficiencyScore,proto3,oneof" json:"efficiency_score,omitempty"` - ThresholdCount *uint64 `protobuf:"varint,27,opt,name=threshold_count,json=thresholdCount,proto3,oneof" json:"threshold_count,omitempty"` - ExpansionNumber *uint64 `protobuf:"varint,28,opt,name=expansion_number,json=expansionNumber,proto3,oneof" json:"expansion_number,omitempty"` - EtxEligibleSlices *common.ProtoHash `protobuf:"bytes,29,opt,name=etx_eligible_slices,json=etxEligibleSlices,proto3,oneof" json:"etx_eligible_slices,omitempty"` - PrimeTerminus *common.ProtoHash `protobuf:"bytes,30,opt,name=prime_terminus,json=primeTerminus,proto3,oneof" json:"prime_terminus,omitempty"` - InterlinkRootHash *common.ProtoHash `protobuf:"bytes,31,opt,name=interlink_root_hash,json=interlinkRootHash,proto3,oneof" json:"interlink_root_hash,omitempty"` + Extra []byte `protobuf:"bytes,20,opt,name=extra,proto3,oneof" json:"extra,omitempty"` + MixHash *common.ProtoHash `protobuf:"bytes,21,opt,name=mix_hash,json=mixHash,proto3,oneof" json:"mix_hash,omitempty"` + Nonce *uint64 `protobuf:"varint,22,opt,name=nonce,proto3,oneof" json:"nonce,omitempty"` + UtxoRoot *common.ProtoHash `protobuf:"bytes,23,opt,name=utxo_root,json=utxoRoot,proto3,oneof" json:"utxo_root,omitempty"` + EtxSetHash *common.ProtoHash `protobuf:"bytes,24,opt,name=etx_set_hash,json=etxSetHash,proto3,oneof" json:"etx_set_hash,omitempty"` + EfficiencyScore *uint64 `protobuf:"varint,25,opt,name=efficiency_score,json=efficiencyScore,proto3,oneof" json:"efficiency_score,omitempty"` + ThresholdCount *uint64 `protobuf:"varint,26,opt,name=threshold_count,json=thresholdCount,proto3,oneof" json:"threshold_count,omitempty"` + ExpansionNumber *uint64 `protobuf:"varint,27,opt,name=expansion_number,json=expansionNumber,proto3,oneof" json:"expansion_number,omitempty"` + EtxEligibleSlices *common.ProtoHash `protobuf:"bytes,28,opt,name=etx_eligible_slices,json=etxEligibleSlices,proto3,oneof" json:"etx_eligible_slices,omitempty"` + PrimeTerminus *common.ProtoHash `protobuf:"bytes,29,opt,name=prime_terminus,json=primeTerminus,proto3,oneof" json:"prime_terminus,omitempty"` + InterlinkRootHash *common.ProtoHash `protobuf:"bytes,30,opt,name=interlink_root_hash,json=interlinkRootHash,proto3,oneof" json:"interlink_root_hash,omitempty"` } func (x *ProtoHeader) Reset() { *x = ProtoHeader{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[2] + mi := &file_core_types_proto_block_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -210,7 +74,7 @@ func (x *ProtoHeader) String() string { func (*ProtoHeader) ProtoMessage() {} func (x *ProtoHeader) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[2] + mi := &file_core_types_proto_block_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -223,7 +87,7 @@ func (x *ProtoHeader) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoHeader.ProtoReflect.Descriptor instead. func (*ProtoHeader) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{2} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{0} } func (x *ProtoHeader) GetParentHash() []*common.ProtoHash { @@ -359,13 +223,6 @@ func (x *ProtoHeader) GetLocation() *common.ProtoLocation { return nil } -func (x *ProtoHeader) GetTime() uint64 { - if x != nil && x.Time != nil { - return *x.Time - } - return 0 -} - func (x *ProtoHeader) GetExtra() []byte { if x != nil { return x.Extra @@ -472,7 +329,7 @@ type ProtoTransaction struct { func (x *ProtoTransaction) Reset() { *x = ProtoTransaction{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[3] + mi := &file_core_types_proto_block_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -485,7 +342,7 @@ func (x *ProtoTransaction) String() string { func (*ProtoTransaction) ProtoMessage() {} func (x *ProtoTransaction) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[3] + mi := &file_core_types_proto_block_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -498,7 +355,7 @@ func (x *ProtoTransaction) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoTransaction.ProtoReflect.Descriptor instead. func (*ProtoTransaction) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{3} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{1} } func (x *ProtoTransaction) GetType() uint64 { @@ -645,7 +502,7 @@ type ProtoTransactions struct { func (x *ProtoTransactions) Reset() { *x = ProtoTransactions{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[4] + mi := &file_core_types_proto_block_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -658,7 +515,7 @@ func (x *ProtoTransactions) String() string { func (*ProtoTransactions) ProtoMessage() {} func (x *ProtoTransactions) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[4] + mi := &file_core_types_proto_block_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -671,7 +528,7 @@ func (x *ProtoTransactions) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoTransactions.ProtoReflect.Descriptor instead. func (*ProtoTransactions) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{4} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{2} } func (x *ProtoTransactions) GetTransactions() []*ProtoTransaction { @@ -691,6 +548,100 @@ type ProtoHeaders struct { func (x *ProtoHeaders) Reset() { *x = ProtoHeaders{} + if protoimpl.UnsafeEnabled { + mi := &file_core_types_proto_block_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoHeaders) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoHeaders) ProtoMessage() {} + +func (x *ProtoHeaders) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoHeaders.ProtoReflect.Descriptor instead. +func (*ProtoHeaders) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{3} +} + +func (x *ProtoHeaders) GetHeaders() []*ProtoHeader { + if x != nil { + return x.Headers + } + return nil +} + +type ProtoManifest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Manifest []*common.ProtoHash `protobuf:"bytes,1,rep,name=manifest,proto3" json:"manifest,omitempty"` +} + +func (x *ProtoManifest) Reset() { + *x = ProtoManifest{} + if protoimpl.UnsafeEnabled { + mi := &file_core_types_proto_block_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoManifest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoManifest) ProtoMessage() {} + +func (x *ProtoManifest) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoManifest.ProtoReflect.Descriptor instead. +func (*ProtoManifest) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{4} +} + +func (x *ProtoManifest) GetManifest() []*common.ProtoHash { + if x != nil { + return x.Manifest + } + return nil +} + +type ProtoAccessList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AccessTuples []*ProtoAccessTuple `protobuf:"bytes,1,rep,name=access_tuples,json=accessTuples,proto3" json:"access_tuples,omitempty"` +} + +func (x *ProtoAccessList) Reset() { + *x = ProtoAccessList{} if protoimpl.UnsafeEnabled { mi := &file_core_types_proto_block_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -698,14 +649,224 @@ func (x *ProtoHeaders) Reset() { } } -func (x *ProtoHeaders) String() string { +func (x *ProtoAccessList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoAccessList) ProtoMessage() {} + +func (x *ProtoAccessList) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoAccessList.ProtoReflect.Descriptor instead. +func (*ProtoAccessList) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{5} +} + +func (x *ProtoAccessList) GetAccessTuples() []*ProtoAccessTuple { + if x != nil { + return x.AccessTuples + } + return nil +} + +type ProtoWorkObjectHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + HeaderHash *common.ProtoHash `protobuf:"bytes,1,opt,name=header_hash,json=headerHash,proto3,oneof" json:"header_hash,omitempty"` + ParentHash *common.ProtoHash `protobuf:"bytes,2,opt,name=parent_hash,json=parentHash,proto3,oneof" json:"parent_hash,omitempty"` + Number []byte `protobuf:"bytes,3,opt,name=number,proto3,oneof" json:"number,omitempty"` + Difficulty []byte `protobuf:"bytes,4,opt,name=difficulty,proto3,oneof" json:"difficulty,omitempty"` + TxHash *common.ProtoHash `protobuf:"bytes,5,opt,name=tx_hash,json=txHash,proto3,oneof" json:"tx_hash,omitempty"` + Nonce *uint64 `protobuf:"varint,6,opt,name=nonce,proto3,oneof" json:"nonce,omitempty"` + Location *common.ProtoLocation `protobuf:"bytes,7,opt,name=location,proto3,oneof" json:"location,omitempty"` + MixHash *common.ProtoHash `protobuf:"bytes,8,opt,name=mix_hash,json=mixHash,proto3,oneof" json:"mix_hash,omitempty"` + Time *uint64 `protobuf:"varint,9,opt,name=time,proto3,oneof" json:"time,omitempty"` +} + +func (x *ProtoWorkObjectHeader) Reset() { + *x = ProtoWorkObjectHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_core_types_proto_block_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoWorkObjectHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoWorkObjectHeader) ProtoMessage() {} + +func (x *ProtoWorkObjectHeader) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoWorkObjectHeader.ProtoReflect.Descriptor instead. +func (*ProtoWorkObjectHeader) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{6} +} + +func (x *ProtoWorkObjectHeader) GetHeaderHash() *common.ProtoHash { + if x != nil { + return x.HeaderHash + } + return nil +} + +func (x *ProtoWorkObjectHeader) GetParentHash() *common.ProtoHash { + if x != nil { + return x.ParentHash + } + return nil +} + +func (x *ProtoWorkObjectHeader) GetNumber() []byte { + if x != nil { + return x.Number + } + return nil +} + +func (x *ProtoWorkObjectHeader) GetDifficulty() []byte { + if x != nil { + return x.Difficulty + } + return nil +} + +func (x *ProtoWorkObjectHeader) GetTxHash() *common.ProtoHash { + if x != nil { + return x.TxHash + } + return nil +} + +func (x *ProtoWorkObjectHeader) GetNonce() uint64 { + if x != nil && x.Nonce != nil { + return *x.Nonce + } + return 0 +} + +func (x *ProtoWorkObjectHeader) GetLocation() *common.ProtoLocation { + if x != nil { + return x.Location + } + return nil +} + +func (x *ProtoWorkObjectHeader) GetMixHash() *common.ProtoHash { + if x != nil { + return x.MixHash + } + return nil +} + +func (x *ProtoWorkObjectHeader) GetTime() uint64 { + if x != nil && x.Time != nil { + return *x.Time + } + return 0 +} + +type ProtoWorkObjectHeaders struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + WoHeaders []*ProtoWorkObjectHeader `protobuf:"bytes,1,rep,name=wo_headers,json=woHeaders,proto3" json:"wo_headers,omitempty"` +} + +func (x *ProtoWorkObjectHeaders) Reset() { + *x = ProtoWorkObjectHeaders{} + if protoimpl.UnsafeEnabled { + mi := &file_core_types_proto_block_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoWorkObjectHeaders) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProtoWorkObjectHeaders) ProtoMessage() {} + +func (x *ProtoWorkObjectHeaders) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProtoWorkObjectHeaders.ProtoReflect.Descriptor instead. +func (*ProtoWorkObjectHeaders) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{7} +} + +func (x *ProtoWorkObjectHeaders) GetWoHeaders() []*ProtoWorkObjectHeader { + if x != nil { + return x.WoHeaders + } + return nil +} + +type ProtoWorkObjectBody struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Header *ProtoHeader `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"` + Transactions *ProtoTransactions `protobuf:"bytes,2,opt,name=transactions,proto3,oneof" json:"transactions,omitempty"` + Uncles *ProtoWorkObjectHeaders `protobuf:"bytes,3,opt,name=uncles,proto3,oneof" json:"uncles,omitempty"` + ExtTransactions *ProtoTransactions `protobuf:"bytes,4,opt,name=ext_transactions,json=extTransactions,proto3,oneof" json:"ext_transactions,omitempty"` + Manifest *ProtoManifest `protobuf:"bytes,5,opt,name=manifest,proto3,oneof" json:"manifest,omitempty"` + InterlinkHashes *common.ProtoHashes `protobuf:"bytes,6,opt,name=interlink_hashes,json=interlinkHashes,proto3,oneof" json:"interlink_hashes,omitempty"` +} + +func (x *ProtoWorkObjectBody) Reset() { + *x = ProtoWorkObjectBody{} + if protoimpl.UnsafeEnabled { + mi := &file_core_types_proto_block_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ProtoWorkObjectBody) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProtoHeaders) ProtoMessage() {} +func (*ProtoWorkObjectBody) ProtoMessage() {} -func (x *ProtoHeaders) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[5] +func (x *ProtoWorkObjectBody) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -716,43 +877,80 @@ func (x *ProtoHeaders) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProtoHeaders.ProtoReflect.Descriptor instead. -func (*ProtoHeaders) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{5} +// Deprecated: Use ProtoWorkObjectBody.ProtoReflect.Descriptor instead. +func (*ProtoWorkObjectBody) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{8} } -func (x *ProtoHeaders) GetHeaders() []*ProtoHeader { +func (x *ProtoWorkObjectBody) GetHeader() *ProtoHeader { if x != nil { - return x.Headers + return x.Header } return nil } -type ProtoManifest struct { +func (x *ProtoWorkObjectBody) GetTransactions() *ProtoTransactions { + if x != nil { + return x.Transactions + } + return nil +} + +func (x *ProtoWorkObjectBody) GetUncles() *ProtoWorkObjectHeaders { + if x != nil { + return x.Uncles + } + return nil +} + +func (x *ProtoWorkObjectBody) GetExtTransactions() *ProtoTransactions { + if x != nil { + return x.ExtTransactions + } + return nil +} + +func (x *ProtoWorkObjectBody) GetManifest() *ProtoManifest { + if x != nil { + return x.Manifest + } + return nil +} + +func (x *ProtoWorkObjectBody) GetInterlinkHashes() *common.ProtoHashes { + if x != nil { + return x.InterlinkHashes + } + return nil +} + +type ProtoWorkObject struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Manifest []*common.ProtoHash `protobuf:"bytes,1,rep,name=manifest,proto3" json:"manifest,omitempty"` + WoHeader *ProtoWorkObjectHeader `protobuf:"bytes,1,opt,name=wo_header,json=woHeader,proto3,oneof" json:"wo_header,omitempty"` + WoBody *ProtoWorkObjectBody `protobuf:"bytes,2,opt,name=wo_body,json=woBody,proto3,oneof" json:"wo_body,omitempty"` + Tx *ProtoTransaction `protobuf:"bytes,3,opt,name=tx,proto3,oneof" json:"tx,omitempty"` } -func (x *ProtoManifest) Reset() { - *x = ProtoManifest{} +func (x *ProtoWorkObject) Reset() { + *x = ProtoWorkObject{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[6] + mi := &file_core_types_proto_block_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ProtoManifest) String() string { +func (x *ProtoWorkObject) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProtoManifest) ProtoMessage() {} +func (*ProtoWorkObject) ProtoMessage() {} -func (x *ProtoManifest) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[6] +func (x *ProtoWorkObject) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -763,43 +961,57 @@ func (x *ProtoManifest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProtoManifest.ProtoReflect.Descriptor instead. -func (*ProtoManifest) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{6} +// Deprecated: Use ProtoWorkObject.ProtoReflect.Descriptor instead. +func (*ProtoWorkObject) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{9} } -func (x *ProtoManifest) GetManifest() []*common.ProtoHash { +func (x *ProtoWorkObject) GetWoHeader() *ProtoWorkObjectHeader { if x != nil { - return x.Manifest + return x.WoHeader } return nil } -type ProtoAccessList struct { +func (x *ProtoWorkObject) GetWoBody() *ProtoWorkObjectBody { + if x != nil { + return x.WoBody + } + return nil +} + +func (x *ProtoWorkObject) GetTx() *ProtoTransaction { + if x != nil { + return x.Tx + } + return nil +} + +type ProtoWorkObjects struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AccessTuples []*ProtoAccessTuple `protobuf:"bytes,1,rep,name=access_tuples,json=accessTuples,proto3" json:"access_tuples,omitempty"` + WorkObjects []*ProtoWorkObject `protobuf:"bytes,1,rep,name=work_objects,json=workObjects,proto3" json:"work_objects,omitempty"` } -func (x *ProtoAccessList) Reset() { - *x = ProtoAccessList{} +func (x *ProtoWorkObjects) Reset() { + *x = ProtoWorkObjects{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[7] + mi := &file_core_types_proto_block_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ProtoAccessList) String() string { +func (x *ProtoWorkObjects) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProtoAccessList) ProtoMessage() {} +func (*ProtoWorkObjects) ProtoMessage() {} -func (x *ProtoAccessList) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[7] +func (x *ProtoWorkObjects) ProtoReflect() protoreflect.Message { + mi := &file_core_types_proto_block_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -810,14 +1022,14 @@ func (x *ProtoAccessList) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProtoAccessList.ProtoReflect.Descriptor instead. -func (*ProtoAccessList) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{7} +// Deprecated: Use ProtoWorkObjects.ProtoReflect.Descriptor instead. +func (*ProtoWorkObjects) Descriptor() ([]byte, []int) { + return file_core_types_proto_block_proto_rawDescGZIP(), []int{10} } -func (x *ProtoAccessList) GetAccessTuples() []*ProtoAccessTuple { +func (x *ProtoWorkObjects) GetWorkObjects() []*ProtoWorkObject { if x != nil { - return x.AccessTuples + return x.WorkObjects } return nil } @@ -834,7 +1046,7 @@ type ProtoAccessTuple struct { func (x *ProtoAccessTuple) Reset() { *x = ProtoAccessTuple{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[8] + mi := &file_core_types_proto_block_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -847,7 +1059,7 @@ func (x *ProtoAccessTuple) String() string { func (*ProtoAccessTuple) ProtoMessage() {} func (x *ProtoAccessTuple) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[8] + mi := &file_core_types_proto_block_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -860,7 +1072,7 @@ func (x *ProtoAccessTuple) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoAccessTuple.ProtoReflect.Descriptor instead. func (*ProtoAccessTuple) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{8} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{11} } func (x *ProtoAccessTuple) GetAddress() []byte { @@ -894,7 +1106,7 @@ type ProtoReceiptForStorage struct { func (x *ProtoReceiptForStorage) Reset() { *x = ProtoReceiptForStorage{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[9] + mi := &file_core_types_proto_block_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -907,7 +1119,7 @@ func (x *ProtoReceiptForStorage) String() string { func (*ProtoReceiptForStorage) ProtoMessage() {} func (x *ProtoReceiptForStorage) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[9] + mi := &file_core_types_proto_block_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -920,7 +1132,7 @@ func (x *ProtoReceiptForStorage) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoReceiptForStorage.ProtoReflect.Descriptor instead. func (*ProtoReceiptForStorage) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{9} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{12} } func (x *ProtoReceiptForStorage) GetPostStateOrStatus() []byte { @@ -983,7 +1195,7 @@ type ProtoReceiptsForStorage struct { func (x *ProtoReceiptsForStorage) Reset() { *x = ProtoReceiptsForStorage{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[10] + mi := &file_core_types_proto_block_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -996,7 +1208,7 @@ func (x *ProtoReceiptsForStorage) String() string { func (*ProtoReceiptsForStorage) ProtoMessage() {} func (x *ProtoReceiptsForStorage) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[10] + mi := &file_core_types_proto_block_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1009,7 +1221,7 @@ func (x *ProtoReceiptsForStorage) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoReceiptsForStorage.ProtoReflect.Descriptor instead. func (*ProtoReceiptsForStorage) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{10} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{13} } func (x *ProtoReceiptsForStorage) GetReceipts() []*ProtoReceiptForStorage { @@ -1032,7 +1244,7 @@ type ProtoLogForStorage struct { func (x *ProtoLogForStorage) Reset() { *x = ProtoLogForStorage{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[11] + mi := &file_core_types_proto_block_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1045,7 +1257,7 @@ func (x *ProtoLogForStorage) String() string { func (*ProtoLogForStorage) ProtoMessage() {} func (x *ProtoLogForStorage) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[11] + mi := &file_core_types_proto_block_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1058,7 +1270,7 @@ func (x *ProtoLogForStorage) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoLogForStorage.ProtoReflect.Descriptor instead. func (*ProtoLogForStorage) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{11} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{14} } func (x *ProtoLogForStorage) GetAddress() *common.ProtoAddress { @@ -1093,7 +1305,7 @@ type ProtoLogsForStorage struct { func (x *ProtoLogsForStorage) Reset() { *x = ProtoLogsForStorage{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[12] + mi := &file_core_types_proto_block_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1106,7 +1318,7 @@ func (x *ProtoLogsForStorage) String() string { func (*ProtoLogsForStorage) ProtoMessage() {} func (x *ProtoLogsForStorage) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[12] + mi := &file_core_types_proto_block_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1119,7 +1331,7 @@ func (x *ProtoLogsForStorage) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoLogsForStorage.ProtoReflect.Descriptor instead. func (*ProtoLogsForStorage) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{12} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{15} } func (x *ProtoLogsForStorage) GetLogs() []*ProtoLogForStorage { @@ -1134,14 +1346,14 @@ type ProtoPendingHeader struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Header *ProtoHeader `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"` - Termini *ProtoTermini `protobuf:"bytes,2,opt,name=termini,proto3,oneof" json:"termini,omitempty"` + Wo *ProtoWorkObject `protobuf:"bytes,1,opt,name=wo,proto3,oneof" json:"wo,omitempty"` + Termini *ProtoTermini `protobuf:"bytes,2,opt,name=termini,proto3,oneof" json:"termini,omitempty"` } func (x *ProtoPendingHeader) Reset() { *x = ProtoPendingHeader{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[13] + mi := &file_core_types_proto_block_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1154,7 +1366,7 @@ func (x *ProtoPendingHeader) String() string { func (*ProtoPendingHeader) ProtoMessage() {} func (x *ProtoPendingHeader) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[13] + mi := &file_core_types_proto_block_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1167,12 +1379,12 @@ func (x *ProtoPendingHeader) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoPendingHeader.ProtoReflect.Descriptor instead. func (*ProtoPendingHeader) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{13} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{16} } -func (x *ProtoPendingHeader) GetHeader() *ProtoHeader { +func (x *ProtoPendingHeader) GetWo() *ProtoWorkObject { if x != nil { - return x.Header + return x.Wo } return nil } @@ -1196,7 +1408,7 @@ type ProtoTermini struct { func (x *ProtoTermini) Reset() { *x = ProtoTermini{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[14] + mi := &file_core_types_proto_block_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1209,7 +1421,7 @@ func (x *ProtoTermini) String() string { func (*ProtoTermini) ProtoMessage() {} func (x *ProtoTermini) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[14] + mi := &file_core_types_proto_block_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1222,7 +1434,7 @@ func (x *ProtoTermini) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoTermini.ProtoReflect.Descriptor instead. func (*ProtoTermini) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{14} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{17} } func (x *ProtoTermini) GetDomTermini() []*common.ProtoHash { @@ -1250,7 +1462,7 @@ type ProtoEtxSet struct { func (x *ProtoEtxSet) Reset() { *x = ProtoEtxSet{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[15] + mi := &file_core_types_proto_block_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1263,7 +1475,7 @@ func (x *ProtoEtxSet) String() string { func (*ProtoEtxSet) ProtoMessage() {} func (x *ProtoEtxSet) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[15] + mi := &file_core_types_proto_block_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1276,7 +1488,7 @@ func (x *ProtoEtxSet) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoEtxSet.ProtoReflect.Descriptor instead. func (*ProtoEtxSet) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{15} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{18} } func (x *ProtoEtxSet) GetEtxHashes() []byte { @@ -1291,14 +1503,14 @@ type ProtoPendingEtxs struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Header *ProtoHeader `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"` + Header *ProtoWorkObject `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"` Etxs *ProtoTransactions `protobuf:"bytes,2,opt,name=etxs,proto3,oneof" json:"etxs,omitempty"` } func (x *ProtoPendingEtxs) Reset() { *x = ProtoPendingEtxs{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[16] + mi := &file_core_types_proto_block_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1311,7 +1523,7 @@ func (x *ProtoPendingEtxs) String() string { func (*ProtoPendingEtxs) ProtoMessage() {} func (x *ProtoPendingEtxs) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[16] + mi := &file_core_types_proto_block_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1324,10 +1536,10 @@ func (x *ProtoPendingEtxs) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoPendingEtxs.ProtoReflect.Descriptor instead. func (*ProtoPendingEtxs) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{16} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{19} } -func (x *ProtoPendingEtxs) GetHeader() *ProtoHeader { +func (x *ProtoPendingEtxs) GetHeader() *ProtoWorkObject { if x != nil { return x.Header } @@ -1346,14 +1558,14 @@ type ProtoPendingEtxsRollup struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Header *ProtoHeader `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"` + Header *ProtoWorkObject `protobuf:"bytes,1,opt,name=header,proto3,oneof" json:"header,omitempty"` EtxsRollup *ProtoTransactions `protobuf:"bytes,2,opt,name=etxs_rollup,json=etxsRollup,proto3,oneof" json:"etxs_rollup,omitempty"` } func (x *ProtoPendingEtxsRollup) Reset() { *x = ProtoPendingEtxsRollup{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[17] + mi := &file_core_types_proto_block_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1366,7 +1578,7 @@ func (x *ProtoPendingEtxsRollup) String() string { func (*ProtoPendingEtxsRollup) ProtoMessage() {} func (x *ProtoPendingEtxsRollup) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[17] + mi := &file_core_types_proto_block_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1379,10 +1591,10 @@ func (x *ProtoPendingEtxsRollup) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoPendingEtxsRollup.ProtoReflect.Descriptor instead. func (*ProtoPendingEtxsRollup) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{17} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{20} } -func (x *ProtoPendingEtxsRollup) GetHeader() *ProtoHeader { +func (x *ProtoPendingEtxsRollup) GetHeader() *ProtoWorkObject { if x != nil { return x.Header } @@ -1407,7 +1619,7 @@ type ProtoTxIns struct { func (x *ProtoTxIns) Reset() { *x = ProtoTxIns{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[18] + mi := &file_core_types_proto_block_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1420,7 +1632,7 @@ func (x *ProtoTxIns) String() string { func (*ProtoTxIns) ProtoMessage() {} func (x *ProtoTxIns) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[18] + mi := &file_core_types_proto_block_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1433,7 +1645,7 @@ func (x *ProtoTxIns) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoTxIns.ProtoReflect.Descriptor instead. func (*ProtoTxIns) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{18} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{21} } func (x *ProtoTxIns) GetTxIns() []*ProtoTxIn { @@ -1454,7 +1666,7 @@ type ProtoTxOuts struct { func (x *ProtoTxOuts) Reset() { *x = ProtoTxOuts{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[19] + mi := &file_core_types_proto_block_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1467,7 +1679,7 @@ func (x *ProtoTxOuts) String() string { func (*ProtoTxOuts) ProtoMessage() {} func (x *ProtoTxOuts) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[19] + mi := &file_core_types_proto_block_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1480,7 +1692,7 @@ func (x *ProtoTxOuts) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoTxOuts.ProtoReflect.Descriptor instead. func (*ProtoTxOuts) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{19} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{22} } func (x *ProtoTxOuts) GetTxOuts() []*ProtoTxOut { @@ -1502,7 +1714,7 @@ type ProtoTxIn struct { func (x *ProtoTxIn) Reset() { *x = ProtoTxIn{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[20] + mi := &file_core_types_proto_block_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1515,7 +1727,7 @@ func (x *ProtoTxIn) String() string { func (*ProtoTxIn) ProtoMessage() {} func (x *ProtoTxIn) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[20] + mi := &file_core_types_proto_block_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1528,7 +1740,7 @@ func (x *ProtoTxIn) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoTxIn.ProtoReflect.Descriptor instead. func (*ProtoTxIn) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{20} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{23} } func (x *ProtoTxIn) GetPreviousOutPoint() *ProtoOutPoint { @@ -1557,7 +1769,7 @@ type ProtoOutPoint struct { func (x *ProtoOutPoint) Reset() { *x = ProtoOutPoint{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[21] + mi := &file_core_types_proto_block_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1570,7 +1782,7 @@ func (x *ProtoOutPoint) String() string { func (*ProtoOutPoint) ProtoMessage() {} func (x *ProtoOutPoint) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[21] + mi := &file_core_types_proto_block_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1583,7 +1795,7 @@ func (x *ProtoOutPoint) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoOutPoint.ProtoReflect.Descriptor instead. func (*ProtoOutPoint) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{21} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{24} } func (x *ProtoOutPoint) GetHash() *common.ProtoHash { @@ -1612,7 +1824,7 @@ type ProtoTxOut struct { func (x *ProtoTxOut) Reset() { *x = ProtoTxOut{} if protoimpl.UnsafeEnabled { - mi := &file_core_types_proto_block_proto_msgTypes[22] + mi := &file_core_types_proto_block_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1625,7 +1837,7 @@ func (x *ProtoTxOut) String() string { func (*ProtoTxOut) ProtoMessage() {} func (x *ProtoTxOut) ProtoReflect() protoreflect.Message { - mi := &file_core_types_proto_block_proto_msgTypes[22] + mi := &file_core_types_proto_block_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1638,7 +1850,7 @@ func (x *ProtoTxOut) ProtoReflect() protoreflect.Message { // Deprecated: Use ProtoTxOut.ProtoReflect.Descriptor instead. func (*ProtoTxOut) Descriptor() ([]byte, []int) { - return file_core_types_proto_block_proto_rawDescGZIP(), []int{22} + return file_core_types_proto_block_proto_rawDescGZIP(), []int{25} } func (x *ProtoTxOut) GetDenomination() uint32 { @@ -1662,346 +1874,402 @@ var file_core_types_proto_block_proto_rawDesc = []byte{ 0x74, 0x6f, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x7c, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2f, - 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, - 0x29, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x6f, 0x64, 0x79, 0x48, - 0x01, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x22, 0xdb, - 0x02, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2f, 0x0a, 0x03, - 0x74, 0x78, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x48, 0x00, 0x52, 0x03, 0x74, 0x78, 0x73, 0x88, 0x01, 0x01, 0x12, 0x30, 0x0a, - 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x73, 0x48, 0x01, 0x52, 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, - 0x31, 0x0a, 0x04, 0x65, 0x74, 0x78, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x02, 0x52, 0x04, 0x65, 0x74, 0x78, 0x73, 0x88, - 0x01, 0x01, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x48, 0x03, 0x52, 0x08, 0x6d, 0x61, - 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x12, 0x43, 0x0a, 0x10, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x48, 0x04, 0x52, 0x0f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x88, 0x01, 0x01, 0x42, 0x06, - 0x0a, 0x04, 0x5f, 0x74, 0x78, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x75, 0x6e, 0x63, 0x6c, 0x65, - 0x73, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x65, 0x74, 0x78, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6d, - 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0xb1, 0x0e, 0x0a, - 0x0b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x0b, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x48, 0x61, 0x73, 0x68, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x35, 0x0a, 0x0a, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x00, 0x52, 0x09, 0x75, 0x6e, 0x63, 0x6c, 0x65, - 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, - 0x61, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x08, 0x63, 0x6f, 0x69, - 0x6e, 0x62, 0x61, 0x73, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x08, 0x65, 0x76, 0x6d, 0x5f, - 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x02, 0x52, - 0x07, 0x65, 0x76, 0x6d, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x07, 0x74, - 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, - 0x03, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x08, - 0x65, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x22, 0x8f, 0x0e, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x32, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x35, 0x0a, 0x0a, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x00, 0x52, 0x09, 0x75, + 0x6e, 0x63, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x1f, 0x0a, 0x08, 0x63, + 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, + 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x08, + 0x65, 0x76, 0x6d, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, - 0x68, 0x48, 0x04, 0x52, 0x07, 0x65, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x3e, 0x0a, 0x0f, 0x65, 0x74, 0x78, 0x5f, 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x5f, 0x68, 0x61, - 0x73, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x05, 0x52, 0x0d, 0x65, - 0x74, 0x78, 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x36, 0x0a, 0x0d, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, 0x61, 0x6e, 0x69, 0x66, - 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x39, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, - 0x70, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, - 0x48, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x48, 0x61, 0x73, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x07, 0x52, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, - 0x75, 0x6c, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x12, 0x24, - 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x5f, 0x73, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x44, 0x65, - 0x6c, 0x74, 0x61, 0x53, 0x12, 0x38, 0x0a, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x75, - 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x5f, - 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x55, - 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x53, 0x75, 0x62, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x53, 0x12, 0x1e, - 0x0a, 0x08, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, - 0x48, 0x08, 0x52, 0x07, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x53, 0x88, 0x01, 0x01, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x48, 0x09, 0x52, 0x08, 0x67, 0x61, 0x73, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, - 0x75, 0x73, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x48, 0x0a, 0x52, 0x07, 0x67, 0x61, - 0x73, 0x55, 0x73, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x62, 0x61, 0x73, 0x65, - 0x5f, 0x66, 0x65, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x0b, 0x52, 0x07, 0x62, 0x61, - 0x73, 0x65, 0x46, 0x65, 0x65, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x0c, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, - 0x12, 0x17, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x48, 0x0d, - 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x78, 0x74, - 0x72, 0x61, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x0e, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x08, 0x6d, 0x69, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x0f, 0x52, 0x07, 0x6d, 0x69, 0x78, - 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x48, 0x10, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x88, - 0x01, 0x01, 0x12, 0x33, 0x0a, 0x09, 0x75, 0x74, 0x78, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x11, 0x52, 0x08, 0x75, 0x74, 0x78, 0x6f, - 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0c, 0x65, 0x74, 0x78, 0x5f, 0x73, - 0x65, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, - 0x48, 0x12, 0x52, 0x0a, 0x65, 0x74, 0x78, 0x53, 0x65, 0x74, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x2e, 0x0a, 0x10, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x63, 0x79, 0x5f, - 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x04, 0x48, 0x13, 0x52, 0x0f, 0x65, - 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x63, 0x79, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x88, 0x01, - 0x01, 0x12, 0x2c, 0x0a, 0x0f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x48, 0x14, 0x52, 0x0e, 0x74, 0x68, - 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, - 0x2e, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x04, 0x48, 0x15, 0x52, 0x0f, 0x65, 0x78, 0x70, - 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, - 0x46, 0x0a, 0x13, 0x65, 0x74, 0x78, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x5f, - 0x73, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, + 0x68, 0x48, 0x02, 0x52, 0x07, 0x65, 0x76, 0x6d, 0x52, 0x6f, 0x6f, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x2f, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, + 0x61, 0x73, 0x68, 0x48, 0x03, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x31, 0x0a, 0x08, 0x65, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x04, 0x52, 0x07, 0x65, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x0f, 0x65, 0x74, 0x78, 0x5f, 0x72, 0x6f, 0x6c, 0x6c, 0x75, + 0x70, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, - 0x16, 0x52, 0x11, 0x65, 0x74, 0x78, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x6c, - 0x69, 0x63, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x3d, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x65, - 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x75, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x05, 0x52, 0x0d, 0x65, 0x74, 0x78, 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x48, 0x61, 0x73, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x0d, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0c, 0x6d, + 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x39, 0x0a, 0x0c, 0x72, + 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x48, 0x61, 0x73, 0x68, 0x48, 0x06, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x48, + 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x07, 0x52, 0x0a, 0x64, 0x69, + 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x6f, 0x70, 0x79, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x6f, + 0x70, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x65, 0x6c, + 0x74, 0x61, 0x5f, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x53, 0x12, 0x38, 0x0a, 0x19, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x64, 0x65, + 0x6c, 0x74, 0x61, 0x5f, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x15, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x53, 0x75, 0x62, 0x44, 0x65, 0x6c, 0x74, + 0x61, 0x53, 0x12, 0x1e, 0x0a, 0x08, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x08, 0x52, 0x07, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x53, 0x88, + 0x01, 0x01, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x67, 0x61, + 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x48, 0x09, 0x52, + 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, + 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x48, 0x0a, + 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, + 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x0b, + 0x52, 0x07, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x0c, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x0d, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x88, 0x01, 0x01, 0x12, + 0x31, 0x0a, 0x08, 0x6d, 0x69, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x48, 0x61, 0x73, 0x68, 0x48, 0x0e, 0x52, 0x07, 0x6d, 0x69, 0x78, 0x48, 0x61, 0x73, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, + 0x04, 0x48, 0x0f, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, + 0x09, 0x75, 0x74, 0x78, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, + 0x61, 0x73, 0x68, 0x48, 0x10, 0x52, 0x08, 0x75, 0x74, 0x78, 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x88, + 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0c, 0x65, 0x74, 0x78, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x11, 0x52, 0x0a, 0x65, + 0x74, 0x78, 0x53, 0x65, 0x74, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x10, + 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x48, 0x12, 0x52, 0x0f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, + 0x65, 0x6e, 0x63, 0x79, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x0f, + 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x1a, 0x20, 0x01, 0x28, 0x04, 0x48, 0x13, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x2e, 0x0a, 0x10, 0x65, 0x78, + 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x1b, + 0x20, 0x01, 0x28, 0x04, 0x48, 0x14, 0x52, 0x0f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x13, 0x65, 0x74, + 0x78, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x6c, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x15, 0x52, 0x11, 0x65, 0x74, + 0x78, 0x45, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x3d, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x75, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x16, 0x52, + 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x65, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x75, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x46, 0x0a, 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, + 0x68, 0x48, 0x17, 0x52, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, + 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x75, 0x6e, + 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x63, 0x6f, 0x69, + 0x6e, 0x62, 0x61, 0x73, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x65, 0x76, 0x6d, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0b, + 0x0a, 0x09, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, + 0x65, 0x74, 0x78, 0x5f, 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, + 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x42, + 0x0b, 0x0a, 0x09, 0x5f, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x5f, 0x73, 0x42, 0x0c, 0x0a, 0x0a, + 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x67, + 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x62, 0x61, 0x73, 0x65, + 0x5f, 0x66, 0x65, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, + 0x6d, 0x69, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6e, 0x6f, 0x6e, + 0x63, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x75, 0x74, 0x78, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x63, 0x79, + 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, + 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, + 0x16, 0x0a, 0x14, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, + 0x5f, 0x73, 0x6c, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x72, 0x69, 0x6d, + 0x65, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x75, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, + 0x73, 0x68, 0x22, 0xf7, 0x06, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, + 0x12, 0x13, 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x02, + 0x74, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x88, 0x01, 0x01, + 0x12, 0x19, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, + 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, 0x03, 0x67, + 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x48, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x05, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x06, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0b, 0x67, + 0x61, 0x73, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, + 0x48, 0x07, 0x52, 0x09, 0x67, 0x61, 0x73, 0x46, 0x65, 0x65, 0x43, 0x61, 0x70, 0x88, 0x01, 0x01, + 0x12, 0x23, 0x0a, 0x0b, 0x67, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x70, 0x5f, 0x63, 0x61, 0x70, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x08, 0x52, 0x09, 0x67, 0x61, 0x73, 0x54, 0x69, 0x70, 0x43, + 0x61, 0x70, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, + 0x88, 0x01, 0x01, 0x12, 0x11, 0x0a, 0x01, 0x76, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x0a, + 0x52, 0x01, 0x76, 0x88, 0x01, 0x01, 0x12, 0x11, 0x0a, 0x01, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x0b, 0x52, 0x01, 0x72, 0x88, 0x01, 0x01, 0x12, 0x11, 0x0a, 0x01, 0x73, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x0c, 0x52, 0x01, 0x73, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x13, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x78, 0x5f, 0x68, + 0x61, 0x73, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x0d, 0x52, 0x11, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x78, 0x48, 0x61, 0x73, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x65, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x0e, 0x52, 0x08, 0x65, 0x74, 0x78, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x73, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, 0x49, 0x6e, 0x73, 0x48, 0x0f, 0x52, 0x05, 0x74, 0x78, 0x49, + 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x30, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x48, 0x10, 0x52, 0x06, 0x74, 0x78, + 0x4f, 0x75, 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x11, 0x52, 0x09, 0x73, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x74, + 0x78, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x12, + 0x52, 0x09, 0x65, 0x74, 0x78, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x07, + 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x74, 0x6f, 0x42, 0x08, + 0x0a, 0x06, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x67, 0x61, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x63, 0x61, 0x70, + 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x70, 0x5f, 0x63, 0x61, 0x70, + 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, + 0x42, 0x04, 0x0a, 0x02, 0x5f, 0x76, 0x42, 0x04, 0x0a, 0x02, 0x5f, 0x72, 0x42, 0x04, 0x0a, 0x02, + 0x5f, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x65, + 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x78, 0x5f, + 0x69, 0x6e, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x42, + 0x0c, 0x0a, 0x0a, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x0d, 0x0a, + 0x0b, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x22, 0x50, 0x0a, 0x11, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3b, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3c, + 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2c, + 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x3e, 0x0a, 0x0d, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, + 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, - 0x73, 0x68, 0x48, 0x17, 0x52, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x65, 0x54, 0x65, 0x72, 0x6d, 0x69, - 0x6e, 0x75, 0x73, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, - 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x18, 0x52, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, - 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x42, 0x0d, - 0x0a, 0x0b, 0x5f, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0b, 0x0a, - 0x09, 0x5f, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x65, - 0x76, 0x6d, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x78, 0x5f, 0x68, - 0x61, 0x73, 0x68, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, - 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, - 0x75, 0x6c, 0x74, 0x79, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x64, 0x5f, - 0x73, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, - 0x0b, 0x0a, 0x09, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x42, 0x0b, 0x0a, 0x09, - 0x5f, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, - 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6d, 0x69, - 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, - 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x75, 0x74, 0x78, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x42, 0x0f, - 0x0a, 0x0d, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, - 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x73, - 0x63, 0x6f, 0x72, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, - 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x78, 0x70, - 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x16, 0x0a, - 0x14, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x65, 0x6c, 0x69, 0x67, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x73, - 0x6c, 0x69, 0x63, 0x65, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x65, 0x5f, - 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x75, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x22, 0xf7, 0x06, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, 0x12, 0x13, - 0x0a, 0x02, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x02, 0x74, 0x6f, - 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x48, 0x02, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x88, 0x01, 0x01, 0x12, 0x19, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x03, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01, 0x01, 0x12, 0x15, 0x0a, 0x03, 0x67, 0x61, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x48, 0x04, 0x52, 0x03, 0x67, 0x61, 0x73, 0x88, 0x01, 0x01, - 0x12, 0x17, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x05, - 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x06, 0x52, 0x07, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0b, 0x67, 0x61, 0x73, - 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x07, - 0x52, 0x09, 0x67, 0x61, 0x73, 0x46, 0x65, 0x65, 0x43, 0x61, 0x70, 0x88, 0x01, 0x01, 0x12, 0x23, - 0x0a, 0x0b, 0x67, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x70, 0x5f, 0x63, 0x61, 0x70, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0c, 0x48, 0x08, 0x52, 0x09, 0x67, 0x61, 0x73, 0x54, 0x69, 0x70, 0x43, 0x61, 0x70, - 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, - 0x73, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, - 0x48, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x88, 0x01, - 0x01, 0x12, 0x11, 0x0a, 0x01, 0x76, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x0a, 0x52, 0x01, - 0x76, 0x88, 0x01, 0x01, 0x12, 0x11, 0x0a, 0x01, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x48, - 0x0b, 0x52, 0x01, 0x72, 0x88, 0x01, 0x01, 0x12, 0x11, 0x0a, 0x01, 0x73, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x0c, 0x48, 0x0c, 0x52, 0x01, 0x73, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x13, 0x6f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, - 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x0d, 0x52, 0x11, 0x6f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x78, 0x48, 0x61, 0x73, 0x68, 0x88, - 0x01, 0x01, 0x12, 0x20, 0x0a, 0x09, 0x65, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x0e, 0x52, 0x08, 0x65, 0x74, 0x78, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x73, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x54, 0x78, 0x49, 0x6e, 0x73, 0x48, 0x0f, 0x52, 0x05, 0x74, 0x78, 0x49, 0x6e, 0x73, - 0x88, 0x01, 0x01, 0x12, 0x30, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x18, 0x11, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x54, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x48, 0x10, 0x52, 0x06, 0x74, 0x78, 0x4f, 0x75, - 0x74, 0x73, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x11, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x74, 0x78, 0x5f, - 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x12, 0x52, 0x09, - 0x65, 0x74, 0x78, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x74, 0x6f, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x67, 0x61, 0x73, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x42, 0x0e, - 0x0a, 0x0c, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x42, 0x0e, - 0x0a, 0x0c, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x70, 0x5f, 0x63, 0x61, 0x70, 0x42, 0x0e, - 0x0a, 0x0c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x42, 0x04, - 0x0a, 0x02, 0x5f, 0x76, 0x42, 0x04, 0x0a, 0x02, 0x5f, 0x72, 0x42, 0x04, 0x0a, 0x02, 0x5f, 0x73, - 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x65, 0x74, 0x78, - 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x78, 0x5f, 0x69, 0x6e, - 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x42, 0x0c, 0x0a, - 0x0a, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, - 0x65, 0x74, 0x78, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x22, 0x50, 0x0a, 0x11, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x3b, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3c, 0x0a, 0x0c, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2c, 0x0a, 0x07, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x73, 0x68, 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x0f, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x3c, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x75, 0x70, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x52, + 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x22, 0x8e, 0x04, + 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, + 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x37, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x01, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x02, 0x52, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x03, 0x52, 0x0a, 0x64, 0x69, + 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x79, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x07, 0x74, + 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, + 0x04, 0x52, 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, + 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x48, 0x05, 0x52, 0x05, 0x6e, + 0x6f, 0x6e, 0x63, 0x65, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x48, 0x06, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, + 0x31, 0x0a, 0x08, 0x6d, 0x69, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x48, 0x61, 0x73, 0x68, 0x48, 0x07, 0x52, 0x07, 0x6d, 0x69, 0x78, 0x48, 0x61, 0x73, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, + 0x48, 0x08, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x69, + 0x63, 0x75, 0x6c, 0x74, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x42, 0x0b, 0x0a, 0x09, 0x5f, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6d, 0x69, 0x78, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x55, + 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x0a, 0x77, 0x6f, 0x5f, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x09, 0x77, 0x6f, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xe9, 0x03, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, + 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x2f, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6d, - 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, - 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, - 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x52, 0x0c, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x73, 0x22, 0x60, 0x0a, 0x10, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, - 0x68, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xdf, 0x02, - 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x46, 0x6f, - 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x6f, 0x73, 0x74, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x70, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x4f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x6d, - 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x78, 0x5f, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x74, - 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3f, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, - 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x4c, 0x6f, 0x67, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x04, 0x65, 0x74, 0x78, 0x73, 0x18, 0x06, + 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x41, + 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x04, - 0x65, 0x74, 0x78, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22, - 0x54, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, - 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x72, 0x65, - 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, - 0x74, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, 0x65, 0x63, - 0x65, 0x69, 0x70, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, - 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x06, - 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, + 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x01, + 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x3a, 0x0a, 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, + 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x48, 0x02, 0x52, 0x06, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, + 0x10, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x48, 0x03, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, + 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, 0x6c, 0x6f, 0x63, + 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x48, + 0x04, 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x12, 0x43, + 0x0a, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x48, 0x05, 0x52, + 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, + 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x0f, + 0x0a, 0x0d, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x75, 0x6e, 0x63, 0x6c, 0x65, 0x73, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, + 0x78, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, + 0x0b, 0x0a, 0x09, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x42, 0x13, 0x0a, 0x11, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, + 0x73, 0x22, 0xda, 0x01, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3e, 0x0a, 0x09, 0x77, 0x6f, 0x5f, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x77, 0x6f, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x6f, 0x5f, 0x62, 0x6f, 0x64, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x6f, + 0x64, 0x79, 0x48, 0x01, 0x52, 0x06, 0x77, 0x6f, 0x42, 0x6f, 0x64, 0x79, 0x88, 0x01, 0x01, 0x12, + 0x2c, 0x0a, 0x02, 0x74, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x02, 0x52, 0x02, 0x74, 0x78, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, + 0x0a, 0x5f, 0x77, 0x6f, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x5f, + 0x77, 0x6f, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x74, 0x78, 0x22, 0x4d, + 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x39, 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x22, 0x60, 0x0a, + 0x10, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x75, 0x70, 0x6c, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, + 0x61, 0x73, 0x68, 0x52, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4b, 0x65, 0x79, 0x22, + 0xdf, 0x02, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, + 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x70, 0x6f, + 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x70, 0x6f, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x4f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x07, 0x74, + 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, - 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x44, 0x0a, 0x13, 0x50, + 0x06, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3f, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x67, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, - 0x67, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x04, 0x6c, 0x6f, 0x67, - 0x73, 0x22, 0x90, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x65, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x07, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x48, - 0x01, 0x52, 0x07, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, - 0x07, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x69, 0x22, 0x76, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x69, 0x12, 0x32, 0x0a, 0x0b, 0x64, 0x6f, 0x6d, 0x5f, 0x74, 0x65, 0x72, 0x6d, - 0x69, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0a, 0x64, 0x6f, - 0x6d, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x12, 0x32, 0x0a, 0x0b, 0x73, 0x75, 0x62, 0x5f, - 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x67, 0x65, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x2c, 0x0a, 0x04, 0x65, 0x74, 0x78, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x04, 0x65, 0x74, 0x78, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, + 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, + 0x64, 0x22, 0x54, 0x0a, 0x17, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, + 0x74, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x63, 0x65, + 0x69, 0x70, 0x74, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x08, 0x72, + 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x2e, + 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, + 0x0a, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, + 0x68, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x44, 0x0a, + 0x13, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x67, 0x73, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x04, 0x6c, + 0x6f, 0x67, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x02, 0x77, 0x6f, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, + 0x52, 0x02, 0x77, 0x6f, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x07, 0x74, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x48, 0x01, 0x52, + 0x07, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x88, 0x01, 0x01, 0x42, 0x05, 0x0a, 0x03, 0x5f, + 0x77, 0x6f, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x22, 0x76, + 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x12, 0x32, + 0x0a, 0x0b, 0x64, 0x6f, 0x6d, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0a, 0x64, 0x6f, 0x6d, 0x54, 0x65, 0x72, 0x6d, 0x69, + 0x6e, 0x69, 0x12, 0x32, 0x0a, 0x0b, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x69, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x54, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x22, 0x40, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, + 0x74, 0x78, 0x53, 0x65, 0x74, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09, 0x65, 0x74, 0x78, + 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x74, + 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x10, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x45, 0x74, 0x78, 0x73, 0x12, 0x33, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, + 0x01, 0x01, 0x12, 0x31, 0x0a, 0x04, 0x65, 0x74, 0x78, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x01, 0x52, 0x04, 0x65, 0x74, + 0x78, 0x73, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x65, 0x74, 0x78, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x16, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x45, 0x74, 0x78, 0x73, 0x52, 0x6f, + 0x6c, 0x6c, 0x75, 0x70, 0x12, 0x33, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x06, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x74, 0x78, + 0x73, 0x5f, 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x01, 0x52, 0x0a, 0x65, 0x74, 0x78, 0x73, + 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x74, 0x78, 0x73, 0x5f, 0x72, 0x6f, + 0x6c, 0x6c, 0x75, 0x70, 0x22, 0x35, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, 0x49, + 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x06, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x54, 0x78, 0x49, 0x6e, 0x52, 0x05, 0x74, 0x78, 0x49, 0x6e, 0x73, 0x22, 0x39, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x78, + 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, 0x4f, 0x75, 0x74, 0x52, 0x06, + 0x74, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x54, 0x78, 0x49, 0x6e, 0x12, 0x47, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, + 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4f, 0x75, + 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, + 0x75, 0x73, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1c, 0x0a, + 0x07, 0x70, 0x75, 0x62, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, + 0x52, 0x06, 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, + 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x75, 0x62, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x69, + 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x2a, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, - 0x52, 0x0a, 0x73, 0x75, 0x62, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x69, 0x22, 0x40, 0x0a, 0x0b, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x45, 0x74, 0x78, 0x53, 0x65, 0x74, 0x12, 0x22, 0x0a, 0x0a, 0x65, - 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, - 0x00, 0x52, 0x09, 0x65, 0x74, 0x78, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x88, 0x01, 0x01, 0x42, - 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x74, 0x78, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x8a, - 0x01, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x45, - 0x74, 0x78, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x04, 0x65, 0x74, 0x78, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x01, 0x52, 0x04, - 0x65, 0x74, 0x78, 0x73, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x65, 0x74, 0x78, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x16, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x45, 0x74, 0x78, 0x73, - 0x52, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x12, 0x2f, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x0b, 0x65, 0x74, 0x78, 0x73, 0x5f, - 0x72, 0x6f, 0x6c, 0x6c, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x01, 0x52, 0x0a, 0x65, 0x74, 0x78, 0x73, 0x52, 0x6f, - 0x6c, 0x6c, 0x75, 0x70, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x65, 0x74, 0x78, 0x73, 0x5f, 0x72, 0x6f, 0x6c, 0x6c, - 0x75, 0x70, 0x22, 0x35, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, 0x49, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x06, 0x74, 0x78, 0x5f, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, - 0x49, 0x6e, 0x52, 0x05, 0x74, 0x78, 0x49, 0x6e, 0x73, 0x22, 0x39, 0x0a, 0x0b, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x54, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x78, 0x5f, 0x6f, - 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, 0x4f, 0x75, 0x74, 0x52, 0x06, 0x74, 0x78, - 0x4f, 0x75, 0x74, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x78, - 0x49, 0x6e, 0x12, 0x47, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6f, - 0x75, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4f, 0x75, 0x74, 0x50, - 0x6f, 0x69, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, - 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x12, 0x1c, 0x0a, 0x07, 0x70, - 0x75, 0x62, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x06, - 0x70, 0x75, 0x62, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x70, 0x72, - 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6f, 0x75, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, - 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x75, 0x62, 0x5f, 0x6b, 0x65, 0x79, 0x22, 0x69, 0x0a, 0x0d, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4f, 0x75, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2a, 0x0a, - 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x00, - 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, 0x08, 0x0a, - 0x06, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x71, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x54, 0x78, 0x4f, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0c, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x0c, 0x64, - 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x1d, - 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, - 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, - 0x0d, 0x5f, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, - 0x0a, 0x08, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x6e, - 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x2d, - 0x71, 0x75, 0x61, 0x69, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x48, 0x00, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x05, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x42, + 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x71, 0x0a, 0x0a, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x54, 0x78, 0x4f, 0x75, 0x74, 0x12, 0x27, 0x0a, 0x0c, 0x64, 0x65, 0x6e, 0x6f, 0x6d, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, + 0x0c, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, + 0x12, 0x1d, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x48, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x88, 0x01, 0x01, 0x42, + 0x0f, 0x0a, 0x0d, 0x5f, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x33, 0x5a, 0x31, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, + 0x61, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, + 0x6f, 0x2d, 0x71, 0x75, 0x61, 0x69, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2016,93 +2284,105 @@ func file_core_types_proto_block_proto_rawDescGZIP() []byte { return file_core_types_proto_block_proto_rawDescData } -var file_core_types_proto_block_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_core_types_proto_block_proto_msgTypes = make([]protoimpl.MessageInfo, 26) var file_core_types_proto_block_proto_goTypes = []interface{}{ - (*ProtoBlock)(nil), // 0: block.ProtoBlock - (*ProtoBody)(nil), // 1: block.ProtoBody - (*ProtoHeader)(nil), // 2: block.ProtoHeader - (*ProtoTransaction)(nil), // 3: block.ProtoTransaction - (*ProtoTransactions)(nil), // 4: block.ProtoTransactions - (*ProtoHeaders)(nil), // 5: block.ProtoHeaders - (*ProtoManifest)(nil), // 6: block.ProtoManifest - (*ProtoAccessList)(nil), // 7: block.ProtoAccessList - (*ProtoAccessTuple)(nil), // 8: block.ProtoAccessTuple - (*ProtoReceiptForStorage)(nil), // 9: block.ProtoReceiptForStorage - (*ProtoReceiptsForStorage)(nil), // 10: block.ProtoReceiptsForStorage - (*ProtoLogForStorage)(nil), // 11: block.ProtoLogForStorage - (*ProtoLogsForStorage)(nil), // 12: block.ProtoLogsForStorage - (*ProtoPendingHeader)(nil), // 13: block.ProtoPendingHeader - (*ProtoTermini)(nil), // 14: block.ProtoTermini - (*ProtoEtxSet)(nil), // 15: block.ProtoEtxSet - (*ProtoPendingEtxs)(nil), // 16: block.ProtoPendingEtxs - (*ProtoPendingEtxsRollup)(nil), // 17: block.ProtoPendingEtxsRollup - (*ProtoTxIns)(nil), // 18: block.ProtoTxIns - (*ProtoTxOuts)(nil), // 19: block.ProtoTxOuts - (*ProtoTxIn)(nil), // 20: block.ProtoTxIn - (*ProtoOutPoint)(nil), // 21: block.ProtoOutPoint - (*ProtoTxOut)(nil), // 22: block.ProtoTxOut - (*common.ProtoHashes)(nil), // 23: common.ProtoHashes - (*common.ProtoHash)(nil), // 24: common.ProtoHash - (*common.ProtoLocation)(nil), // 25: common.ProtoLocation - (*common.ProtoAddress)(nil), // 26: common.ProtoAddress + (*ProtoHeader)(nil), // 0: block.ProtoHeader + (*ProtoTransaction)(nil), // 1: block.ProtoTransaction + (*ProtoTransactions)(nil), // 2: block.ProtoTransactions + (*ProtoHeaders)(nil), // 3: block.ProtoHeaders + (*ProtoManifest)(nil), // 4: block.ProtoManifest + (*ProtoAccessList)(nil), // 5: block.ProtoAccessList + (*ProtoWorkObjectHeader)(nil), // 6: block.ProtoWorkObjectHeader + (*ProtoWorkObjectHeaders)(nil), // 7: block.ProtoWorkObjectHeaders + (*ProtoWorkObjectBody)(nil), // 8: block.ProtoWorkObjectBody + (*ProtoWorkObject)(nil), // 9: block.ProtoWorkObject + (*ProtoWorkObjects)(nil), // 10: block.ProtoWorkObjects + (*ProtoAccessTuple)(nil), // 11: block.ProtoAccessTuple + (*ProtoReceiptForStorage)(nil), // 12: block.ProtoReceiptForStorage + (*ProtoReceiptsForStorage)(nil), // 13: block.ProtoReceiptsForStorage + (*ProtoLogForStorage)(nil), // 14: block.ProtoLogForStorage + (*ProtoLogsForStorage)(nil), // 15: block.ProtoLogsForStorage + (*ProtoPendingHeader)(nil), // 16: block.ProtoPendingHeader + (*ProtoTermini)(nil), // 17: block.ProtoTermini + (*ProtoEtxSet)(nil), // 18: block.ProtoEtxSet + (*ProtoPendingEtxs)(nil), // 19: block.ProtoPendingEtxs + (*ProtoPendingEtxsRollup)(nil), // 20: block.ProtoPendingEtxsRollup + (*ProtoTxIns)(nil), // 21: block.ProtoTxIns + (*ProtoTxOuts)(nil), // 22: block.ProtoTxOuts + (*ProtoTxIn)(nil), // 23: block.ProtoTxIn + (*ProtoOutPoint)(nil), // 24: block.ProtoOutPoint + (*ProtoTxOut)(nil), // 25: block.ProtoTxOut + (*common.ProtoHash)(nil), // 26: common.ProtoHash + (*common.ProtoLocation)(nil), // 27: common.ProtoLocation + (*common.ProtoHashes)(nil), // 28: common.ProtoHashes + (*common.ProtoAddress)(nil), // 29: common.ProtoAddress } var file_core_types_proto_block_proto_depIdxs = []int32{ - 2, // 0: block.ProtoBlock.header:type_name -> block.ProtoHeader - 1, // 1: block.ProtoBlock.body:type_name -> block.ProtoBody - 4, // 2: block.ProtoBody.txs:type_name -> block.ProtoTransactions - 5, // 3: block.ProtoBody.uncles:type_name -> block.ProtoHeaders - 4, // 4: block.ProtoBody.etxs:type_name -> block.ProtoTransactions - 6, // 5: block.ProtoBody.manifest:type_name -> block.ProtoManifest - 23, // 6: block.ProtoBody.interlink_hashes:type_name -> common.ProtoHashes - 24, // 7: block.ProtoHeader.parent_hash:type_name -> common.ProtoHash - 24, // 8: block.ProtoHeader.uncle_hash:type_name -> common.ProtoHash - 24, // 9: block.ProtoHeader.evm_root:type_name -> common.ProtoHash - 24, // 10: block.ProtoHeader.tx_hash:type_name -> common.ProtoHash - 24, // 11: block.ProtoHeader.etx_hash:type_name -> common.ProtoHash - 24, // 12: block.ProtoHeader.etx_rollup_hash:type_name -> common.ProtoHash - 24, // 13: block.ProtoHeader.manifest_hash:type_name -> common.ProtoHash - 24, // 14: block.ProtoHeader.receipt_hash:type_name -> common.ProtoHash - 25, // 15: block.ProtoHeader.location:type_name -> common.ProtoLocation - 24, // 16: block.ProtoHeader.mix_hash:type_name -> common.ProtoHash - 24, // 17: block.ProtoHeader.utxo_root:type_name -> common.ProtoHash - 24, // 18: block.ProtoHeader.etx_set_hash:type_name -> common.ProtoHash - 24, // 19: block.ProtoHeader.etx_eligible_slices:type_name -> common.ProtoHash - 24, // 20: block.ProtoHeader.prime_terminus:type_name -> common.ProtoHash - 24, // 21: block.ProtoHeader.interlink_root_hash:type_name -> common.ProtoHash - 7, // 22: block.ProtoTransaction.access_list:type_name -> block.ProtoAccessList - 24, // 23: block.ProtoTransaction.originating_tx_hash:type_name -> common.ProtoHash - 18, // 24: block.ProtoTransaction.tx_ins:type_name -> block.ProtoTxIns - 19, // 25: block.ProtoTransaction.tx_outs:type_name -> block.ProtoTxOuts - 3, // 26: block.ProtoTransactions.transactions:type_name -> block.ProtoTransaction - 2, // 27: block.ProtoHeaders.headers:type_name -> block.ProtoHeader - 24, // 28: block.ProtoManifest.manifest:type_name -> common.ProtoHash - 8, // 29: block.ProtoAccessList.access_tuples:type_name -> block.ProtoAccessTuple - 24, // 30: block.ProtoAccessTuple.storage_key:type_name -> common.ProtoHash - 24, // 31: block.ProtoReceiptForStorage.tx_hash:type_name -> common.ProtoHash - 26, // 32: block.ProtoReceiptForStorage.contract_address:type_name -> common.ProtoAddress - 12, // 33: block.ProtoReceiptForStorage.logs:type_name -> block.ProtoLogsForStorage - 4, // 34: block.ProtoReceiptForStorage.etxs:type_name -> block.ProtoTransactions - 9, // 35: block.ProtoReceiptsForStorage.receipts:type_name -> block.ProtoReceiptForStorage - 26, // 36: block.ProtoLogForStorage.address:type_name -> common.ProtoAddress - 24, // 37: block.ProtoLogForStorage.topics:type_name -> common.ProtoHash - 11, // 38: block.ProtoLogsForStorage.logs:type_name -> block.ProtoLogForStorage - 2, // 39: block.ProtoPendingHeader.header:type_name -> block.ProtoHeader - 14, // 40: block.ProtoPendingHeader.termini:type_name -> block.ProtoTermini - 24, // 41: block.ProtoTermini.dom_termini:type_name -> common.ProtoHash - 24, // 42: block.ProtoTermini.sub_termini:type_name -> common.ProtoHash - 2, // 43: block.ProtoPendingEtxs.header:type_name -> block.ProtoHeader - 4, // 44: block.ProtoPendingEtxs.etxs:type_name -> block.ProtoTransactions - 2, // 45: block.ProtoPendingEtxsRollup.header:type_name -> block.ProtoHeader - 4, // 46: block.ProtoPendingEtxsRollup.etxs_rollup:type_name -> block.ProtoTransactions - 20, // 47: block.ProtoTxIns.tx_ins:type_name -> block.ProtoTxIn - 22, // 48: block.ProtoTxOuts.tx_outs:type_name -> block.ProtoTxOut - 21, // 49: block.ProtoTxIn.previous_out_point:type_name -> block.ProtoOutPoint - 24, // 50: block.ProtoOutPoint.hash:type_name -> common.ProtoHash - 51, // [51:51] is the sub-list for method output_type - 51, // [51:51] is the sub-list for method input_type - 51, // [51:51] is the sub-list for extension type_name - 51, // [51:51] is the sub-list for extension extendee - 0, // [0:51] is the sub-list for field type_name + 26, // 0: block.ProtoHeader.parent_hash:type_name -> common.ProtoHash + 26, // 1: block.ProtoHeader.uncle_hash:type_name -> common.ProtoHash + 26, // 2: block.ProtoHeader.evm_root:type_name -> common.ProtoHash + 26, // 3: block.ProtoHeader.tx_hash:type_name -> common.ProtoHash + 26, // 4: block.ProtoHeader.etx_hash:type_name -> common.ProtoHash + 26, // 5: block.ProtoHeader.etx_rollup_hash:type_name -> common.ProtoHash + 26, // 6: block.ProtoHeader.manifest_hash:type_name -> common.ProtoHash + 26, // 7: block.ProtoHeader.receipt_hash:type_name -> common.ProtoHash + 27, // 8: block.ProtoHeader.location:type_name -> common.ProtoLocation + 26, // 9: block.ProtoHeader.mix_hash:type_name -> common.ProtoHash + 26, // 10: block.ProtoHeader.utxo_root:type_name -> common.ProtoHash + 26, // 11: block.ProtoHeader.etx_set_hash:type_name -> common.ProtoHash + 26, // 12: block.ProtoHeader.etx_eligible_slices:type_name -> common.ProtoHash + 26, // 13: block.ProtoHeader.prime_terminus:type_name -> common.ProtoHash + 26, // 14: block.ProtoHeader.interlink_root_hash:type_name -> common.ProtoHash + 5, // 15: block.ProtoTransaction.access_list:type_name -> block.ProtoAccessList + 26, // 16: block.ProtoTransaction.originating_tx_hash:type_name -> common.ProtoHash + 21, // 17: block.ProtoTransaction.tx_ins:type_name -> block.ProtoTxIns + 22, // 18: block.ProtoTransaction.tx_outs:type_name -> block.ProtoTxOuts + 1, // 19: block.ProtoTransactions.transactions:type_name -> block.ProtoTransaction + 0, // 20: block.ProtoHeaders.headers:type_name -> block.ProtoHeader + 26, // 21: block.ProtoManifest.manifest:type_name -> common.ProtoHash + 11, // 22: block.ProtoAccessList.access_tuples:type_name -> block.ProtoAccessTuple + 26, // 23: block.ProtoWorkObjectHeader.header_hash:type_name -> common.ProtoHash + 26, // 24: block.ProtoWorkObjectHeader.parent_hash:type_name -> common.ProtoHash + 26, // 25: block.ProtoWorkObjectHeader.tx_hash:type_name -> common.ProtoHash + 27, // 26: block.ProtoWorkObjectHeader.location:type_name -> common.ProtoLocation + 26, // 27: block.ProtoWorkObjectHeader.mix_hash:type_name -> common.ProtoHash + 6, // 28: block.ProtoWorkObjectHeaders.wo_headers:type_name -> block.ProtoWorkObjectHeader + 0, // 29: block.ProtoWorkObjectBody.header:type_name -> block.ProtoHeader + 2, // 30: block.ProtoWorkObjectBody.transactions:type_name -> block.ProtoTransactions + 7, // 31: block.ProtoWorkObjectBody.uncles:type_name -> block.ProtoWorkObjectHeaders + 2, // 32: block.ProtoWorkObjectBody.ext_transactions:type_name -> block.ProtoTransactions + 4, // 33: block.ProtoWorkObjectBody.manifest:type_name -> block.ProtoManifest + 28, // 34: block.ProtoWorkObjectBody.interlink_hashes:type_name -> common.ProtoHashes + 6, // 35: block.ProtoWorkObject.wo_header:type_name -> block.ProtoWorkObjectHeader + 8, // 36: block.ProtoWorkObject.wo_body:type_name -> block.ProtoWorkObjectBody + 1, // 37: block.ProtoWorkObject.tx:type_name -> block.ProtoTransaction + 9, // 38: block.ProtoWorkObjects.work_objects:type_name -> block.ProtoWorkObject + 26, // 39: block.ProtoAccessTuple.storage_key:type_name -> common.ProtoHash + 26, // 40: block.ProtoReceiptForStorage.tx_hash:type_name -> common.ProtoHash + 29, // 41: block.ProtoReceiptForStorage.contract_address:type_name -> common.ProtoAddress + 15, // 42: block.ProtoReceiptForStorage.logs:type_name -> block.ProtoLogsForStorage + 2, // 43: block.ProtoReceiptForStorage.etxs:type_name -> block.ProtoTransactions + 12, // 44: block.ProtoReceiptsForStorage.receipts:type_name -> block.ProtoReceiptForStorage + 29, // 45: block.ProtoLogForStorage.address:type_name -> common.ProtoAddress + 26, // 46: block.ProtoLogForStorage.topics:type_name -> common.ProtoHash + 14, // 47: block.ProtoLogsForStorage.logs:type_name -> block.ProtoLogForStorage + 9, // 48: block.ProtoPendingHeader.wo:type_name -> block.ProtoWorkObject + 17, // 49: block.ProtoPendingHeader.termini:type_name -> block.ProtoTermini + 26, // 50: block.ProtoTermini.dom_termini:type_name -> common.ProtoHash + 26, // 51: block.ProtoTermini.sub_termini:type_name -> common.ProtoHash + 9, // 52: block.ProtoPendingEtxs.header:type_name -> block.ProtoWorkObject + 2, // 53: block.ProtoPendingEtxs.etxs:type_name -> block.ProtoTransactions + 9, // 54: block.ProtoPendingEtxsRollup.header:type_name -> block.ProtoWorkObject + 2, // 55: block.ProtoPendingEtxsRollup.etxs_rollup:type_name -> block.ProtoTransactions + 23, // 56: block.ProtoTxIns.tx_ins:type_name -> block.ProtoTxIn + 25, // 57: block.ProtoTxOuts.tx_outs:type_name -> block.ProtoTxOut + 24, // 58: block.ProtoTxIn.previous_out_point:type_name -> block.ProtoOutPoint + 26, // 59: block.ProtoOutPoint.hash:type_name -> common.ProtoHash + 60, // [60:60] is the sub-list for method output_type + 60, // [60:60] is the sub-list for method input_type + 60, // [60:60] is the sub-list for extension type_name + 60, // [60:60] is the sub-list for extension extendee + 0, // [0:60] is the sub-list for field type_name } func init() { file_core_types_proto_block_proto_init() } @@ -2112,7 +2392,7 @@ func file_core_types_proto_block_proto_init() { } if !protoimpl.UnsafeEnabled { file_core_types_proto_block_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoBlock); i { + switch v := v.(*ProtoHeader); i { case 0: return &v.state case 1: @@ -2124,7 +2404,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoBody); i { + switch v := v.(*ProtoTransaction); i { case 0: return &v.state case 1: @@ -2136,7 +2416,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoHeader); i { + switch v := v.(*ProtoTransactions); i { case 0: return &v.state case 1: @@ -2148,7 +2428,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoTransaction); i { + switch v := v.(*ProtoHeaders); i { case 0: return &v.state case 1: @@ -2160,7 +2440,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoTransactions); i { + switch v := v.(*ProtoManifest); i { case 0: return &v.state case 1: @@ -2172,7 +2452,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoHeaders); i { + switch v := v.(*ProtoAccessList); i { case 0: return &v.state case 1: @@ -2184,7 +2464,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoManifest); i { + switch v := v.(*ProtoWorkObjectHeader); i { case 0: return &v.state case 1: @@ -2196,7 +2476,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoAccessList); i { + switch v := v.(*ProtoWorkObjectHeaders); i { case 0: return &v.state case 1: @@ -2208,7 +2488,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoAccessTuple); i { + switch v := v.(*ProtoWorkObjectBody); i { case 0: return &v.state case 1: @@ -2220,7 +2500,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoReceiptForStorage); i { + switch v := v.(*ProtoWorkObject); i { case 0: return &v.state case 1: @@ -2232,7 +2512,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoReceiptsForStorage); i { + switch v := v.(*ProtoWorkObjects); i { case 0: return &v.state case 1: @@ -2244,7 +2524,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoLogForStorage); i { + switch v := v.(*ProtoAccessTuple); i { case 0: return &v.state case 1: @@ -2256,7 +2536,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoLogsForStorage); i { + switch v := v.(*ProtoReceiptForStorage); i { case 0: return &v.state case 1: @@ -2268,7 +2548,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoPendingHeader); i { + switch v := v.(*ProtoReceiptsForStorage); i { case 0: return &v.state case 1: @@ -2280,7 +2560,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoTermini); i { + switch v := v.(*ProtoLogForStorage); i { case 0: return &v.state case 1: @@ -2292,7 +2572,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoEtxSet); i { + switch v := v.(*ProtoLogsForStorage); i { case 0: return &v.state case 1: @@ -2304,7 +2584,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoPendingEtxs); i { + switch v := v.(*ProtoPendingHeader); i { case 0: return &v.state case 1: @@ -2316,7 +2596,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoPendingEtxsRollup); i { + switch v := v.(*ProtoTermini); i { case 0: return &v.state case 1: @@ -2328,7 +2608,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoTxIns); i { + switch v := v.(*ProtoEtxSet); i { case 0: return &v.state case 1: @@ -2340,7 +2620,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoTxOuts); i { + switch v := v.(*ProtoPendingEtxs); i { case 0: return &v.state case 1: @@ -2352,7 +2632,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoTxIn); i { + switch v := v.(*ProtoPendingEtxsRollup); i { case 0: return &v.state case 1: @@ -2364,7 +2644,7 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProtoOutPoint); i { + switch v := v.(*ProtoTxIns); i { case 0: return &v.state case 1: @@ -2376,6 +2656,42 @@ func file_core_types_proto_block_proto_init() { } } file_core_types_proto_block_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProtoTxOuts); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_types_proto_block_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProtoTxIn); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_types_proto_block_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProtoOutPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_core_types_proto_block_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProtoTxOut); i { case 0: return &v.state @@ -2390,22 +2706,23 @@ func file_core_types_proto_block_proto_init() { } file_core_types_proto_block_proto_msgTypes[0].OneofWrappers = []interface{}{} file_core_types_proto_block_proto_msgTypes[1].OneofWrappers = []interface{}{} - file_core_types_proto_block_proto_msgTypes[2].OneofWrappers = []interface{}{} - file_core_types_proto_block_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_core_types_proto_block_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_core_types_proto_block_proto_msgTypes[15].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[8].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[9].OneofWrappers = []interface{}{} file_core_types_proto_block_proto_msgTypes[16].OneofWrappers = []interface{}{} - file_core_types_proto_block_proto_msgTypes[17].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[18].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[19].OneofWrappers = []interface{}{} file_core_types_proto_block_proto_msgTypes[20].OneofWrappers = []interface{}{} - file_core_types_proto_block_proto_msgTypes[21].OneofWrappers = []interface{}{} - file_core_types_proto_block_proto_msgTypes[22].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[23].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[24].OneofWrappers = []interface{}{} + file_core_types_proto_block_proto_msgTypes[25].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_core_types_proto_block_proto_rawDesc, NumEnums: 0, - NumMessages: 23, + NumMessages: 26, NumExtensions: 0, NumServices: 0, }, diff --git a/core/types/proto_block.proto b/core/types/proto_block.proto index cc098eb37b..0c29643573 100644 --- a/core/types/proto_block.proto +++ b/core/types/proto_block.proto @@ -5,20 +5,6 @@ option go_package = "github.com/dominant-strategies/go-quai/core/types"; import "common/proto_common.proto"; -// This file defines all the ProtoBuf definitions related to core -message ProtoBlock { - optional ProtoHeader header = 1; - optional ProtoBody body = 2; -} - -message ProtoBody { - optional ProtoTransactions txs = 1; - optional ProtoHeaders uncles = 2; - optional ProtoTransactions etxs = 3; - optional ProtoManifest manifest = 4; - optional common.ProtoHashes interlink_hashes = 5; -} - message ProtoHeader { repeated common.ProtoHash parent_hash = 1; optional common.ProtoHash uncle_hash = 2; @@ -39,18 +25,17 @@ message ProtoHeader { optional uint64 gas_used = 17; optional bytes base_fee = 18; optional common.ProtoLocation location = 19; - optional uint64 time = 20; - optional bytes extra = 21; - optional common.ProtoHash mix_hash = 22; - optional uint64 nonce = 23; - optional common.ProtoHash utxo_root = 24; - optional common.ProtoHash etx_set_hash = 25; - optional uint64 efficiency_score = 26; - optional uint64 threshold_count = 27; - optional uint64 expansion_number = 28; - optional common.ProtoHash etx_eligible_slices = 29; - optional common.ProtoHash prime_terminus = 30; - optional common.ProtoHash interlink_root_hash = 31; + optional bytes extra = 20; + optional common.ProtoHash mix_hash = 21; + optional uint64 nonce = 22; + optional common.ProtoHash utxo_root = 23; + optional common.ProtoHash etx_set_hash = 24; + optional uint64 efficiency_score = 25; + optional uint64 threshold_count = 26; + optional uint64 expansion_number = 27; + optional common.ProtoHash etx_eligible_slices = 28; + optional common.ProtoHash prime_terminus = 29; + optional common.ProtoHash interlink_root_hash = 30; } message ProtoTransaction { @@ -83,6 +68,39 @@ message ProtoManifest { repeated common.ProtoHash manifest = 1; } message ProtoAccessList { repeated ProtoAccessTuple access_tuples = 1; } +message ProtoWorkObjectHeader { + optional common.ProtoHash header_hash = 1; + optional common.ProtoHash parent_hash = 2; + optional bytes number = 3; + optional bytes difficulty = 4; + optional common.ProtoHash tx_hash = 5; + optional uint64 nonce = 6; + optional common.ProtoLocation location = 7; + optional common.ProtoHash mix_hash = 8; + optional uint64 time = 9; +} + +message ProtoWorkObjectHeaders { + repeated ProtoWorkObjectHeader wo_headers = 1; +} + +message ProtoWorkObjectBody { + optional ProtoHeader header = 1; + optional ProtoTransactions transactions = 2; + optional ProtoWorkObjectHeaders uncles = 3; + optional ProtoTransactions ext_transactions = 4; + optional ProtoManifest manifest = 5; + optional common.ProtoHashes interlink_hashes = 6; +} + +message ProtoWorkObject { + optional ProtoWorkObjectHeader wo_header = 1; + optional ProtoWorkObjectBody wo_body = 2; + optional ProtoTransaction tx = 3; +} + +message ProtoWorkObjects { repeated ProtoWorkObject work_objects = 1; } + message ProtoAccessTuple { bytes address = 1; repeated common.ProtoHash storage_key = 2; @@ -111,7 +129,7 @@ message ProtoLogForStorage { message ProtoLogsForStorage { repeated ProtoLogForStorage logs = 1; } message ProtoPendingHeader { - optional ProtoHeader header = 1; + optional ProtoWorkObject wo = 1; optional ProtoTermini termini = 2; } @@ -123,12 +141,12 @@ message ProtoTermini { message ProtoEtxSet { optional bytes etx_hashes = 1; } message ProtoPendingEtxs { - optional ProtoHeader header = 1; + optional ProtoWorkObject header = 1; optional ProtoTransactions etxs = 2; } message ProtoPendingEtxsRollup { - optional ProtoHeader header = 1; + optional ProtoWorkObject header = 1; optional ProtoTransactions etxs_rollup = 2; } diff --git a/core/types/transaction.go b/core/types/transaction.go index 96a7a68b1d..a6515ba5a2 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -28,6 +28,7 @@ import ( "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/common/math" + "github.com/dominant-strategies/go-quai/log" "google.golang.org/protobuf/proto" "github.com/dominant-strategies/go-quai/crypto" @@ -108,11 +109,10 @@ type TxData interface { // ProtoEncode serializes tx into the Quai Proto Transaction format func (tx *Transaction) ProtoEncode() (*ProtoTransaction, error) { + protoTx := &ProtoTransaction{} if tx == nil { - return nil, errors.New("transaction input to ProtoEncode is nil") + return protoTx, nil } - protoTx := &ProtoTransaction{} - // Encoding common fields to all the tx types txType := uint64(tx.Type()) protoTx.Type = &txType @@ -640,6 +640,7 @@ func (tx *Transaction) Hash(location ...byte) (h common.Hash) { } else { from, err := Sender(NewSigner(tx.ChainId(), common.Location{0, 0}), tx) // location not important when performing ecrecover if err != nil { + log.Global.Error("err", err) panic("failed to get transaction sender!") } location := *from.Location() diff --git a/core/types/wo.go b/core/types/wo.go new file mode 100644 index 0000000000..efce5a32ef --- /dev/null +++ b/core/types/wo.go @@ -0,0 +1,979 @@ +package types + +import ( + "errors" + "math/big" + "sync/atomic" + "time" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/common/hexutil" + "github.com/dominant-strategies/go-quai/log" + "google.golang.org/protobuf/proto" + "lukechampine.com/blake3" +) + +type WorkObject struct { + woHeader *WorkObjectHeader + woBody *WorkObjectBody + tx *Transaction + + // caches + size atomic.Value + appendTime atomic.Value + + // These fields are used to track + // inter-peer block relay. + ReceivedAt time.Time + ReceivedFrom interface{} +} + +type WorkObjectHeader struct { + headerHash common.Hash + parentHash common.Hash + number *big.Int + difficulty *big.Int + txHash common.Hash + location common.Location + mixHash common.Hash + time uint64 + nonce BlockNonce + + PowHash atomic.Value + PowDigest atomic.Value +} + +type WorkObjects []*WorkObject + +// Work object types +const ( + BlockObject = iota + TxObject + PEtxObject + PhObject +) + +func (wo *WorkObject) Hash() common.Hash { + return wo.WorkObjectHeader().Hash() +} + +func (wo *WorkObject) SealHash() common.Hash { + return wo.WorkObjectHeader().SealHash() +} + +func (wo *WorkObject) IsUncle() bool { + if wo.WorkObjectHeader() != nil && + wo.Body() == nil { + return true + } + return false +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Getters /////////////// +//////////////////////////////////////////////////////////// + +func (wo *WorkObject) WorkObjectHeader() *WorkObjectHeader { + return wo.woHeader +} + +func (wo *WorkObject) Body() *WorkObjectBody { + return wo.woBody +} + +func (wo *WorkObject) Tx() *Transaction { + return wo.tx +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Setters /////////////// +//////////////////////////////////////////////////////////// + +func (wo *WorkObject) SetWorkObjectHeader(header *WorkObjectHeader) { + wo.woHeader = header +} + +func (wo *WorkObject) SetBody(body *WorkObjectBody) { + wo.woBody = body +} + +func (wo *WorkObject) SetTx(tx *Transaction) { + wo.tx = tx +} + +func (wo *WorkObject) SetAppendTime(appendTime time.Duration) { + wo.appendTime.Store(appendTime) +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Generic Getters /////////////// +//////////////////////////////////////////////////////////// + +// GetAppendTime returns the appendTime of the block +// The appendTime is computed on the first call and cached thereafter. +func (wo *WorkObject) GetAppendTime() time.Duration { + if appendTime := wo.appendTime.Load(); appendTime != nil { + if val, ok := appendTime.(time.Duration); ok { + return val + } + } + return -1 +} + +// Size returns the true RLP encoded storage size of the block, either by encoding +// and returning it, or returning a previsouly cached value. +func (wo *WorkObject) Size() common.StorageSize { + return common.StorageSize(0) +} + +func (wo *WorkObject) HeaderHash() common.Hash { + return wo.WorkObjectHeader().HeaderHash() +} + +func (wo *WorkObject) Difficulty() *big.Int { + return wo.WorkObjectHeader().Difficulty() +} + +func (wo *WorkObject) TxHash() common.Hash { + return wo.WorkObjectHeader().TxHash() +} + +func (wo *WorkObject) MixHash() common.Hash { + return wo.WorkObjectHeader().MixHash() +} + +func (wo *WorkObject) Nonce() BlockNonce { + return wo.WorkObjectHeader().Nonce() +} + +func (wo *WorkObject) Location() common.Location { + return wo.WorkObjectHeader().Location() +} + +func (wo *WorkObject) Time() uint64 { + return wo.WorkObjectHeader().Time() +} + +func (wo *WorkObject) Header() *Header { + return wo.Body().Header() +} + +func (wo *WorkObject) ParentHash(nodeCtx int) common.Hash { + if nodeCtx == common.ZONE_CTX { + return wo.WorkObjectHeader().ParentHash() + } else { + return wo.Body().Header().ParentHash(nodeCtx) + } +} + +func (wo *WorkObject) Number(nodeCtx int) *big.Int { + if nodeCtx == common.ZONE_CTX { + return wo.WorkObjectHeader().Number() + } else { + return wo.Body().Header().Number(nodeCtx) + } +} + +func (wo *WorkObject) NumberU64(nodeCtx int) uint64 { + if nodeCtx == common.ZONE_CTX { + return wo.WorkObjectHeader().NumberU64() + } else { + return wo.Body().Header().NumberU64(nodeCtx) + } +} + +func (wo *WorkObject) NonceU64() uint64 { + return wo.WorkObjectHeader().Nonce().Uint64() +} + +func (wo *WorkObject) UncledS() *big.Int { + return wo.Header().UncledS() +} + +func (wo *WorkObject) EVMRoot() common.Hash { + return wo.Header().EVMRoot() +} + +func (wo *WorkObject) ParentEntropy(nodeCtx int) *big.Int { + return wo.Header().ParentEntropy(nodeCtx) +} + +func (wo *WorkObject) EtxRollupHash() common.Hash { + return wo.Header().EtxRollupHash() +} + +func (wo *WorkObject) EtxSetHash() common.Hash { + return wo.Header().EtxSetHash() +} + +func (wo *WorkObject) BaseFee() *big.Int { + return wo.Header().BaseFee() +} + +func (wo *WorkObject) GasUsed() uint64 { + return wo.Header().GasUsed() +} + +func (wo *WorkObject) GasLimit() uint64 { + return wo.Header().GasLimit() +} + +func (wo *WorkObject) Coinbase() common.Address { + return wo.Header().Coinbase() +} + +func (wo *WorkObject) ManifestHash(nodeCtx int) common.Hash { + return wo.Header().ManifestHash(nodeCtx) +} + +func (wo *WorkObject) ParentDeltaS(nodeCtx int) *big.Int { + return wo.Header().ParentDeltaS(nodeCtx) +} + +func (wo *WorkObject) ParentUncledSubDeltaS(nodeCtx int) *big.Int { + return wo.Header().ParentUncledSubDeltaS(nodeCtx) +} + +func (wo *WorkObject) UncleHash() common.Hash { + return wo.Header().UncleHash() +} + +func (wo *WorkObject) EtxHash() common.Hash { + return wo.Header().EtxHash() +} + +func (wo *WorkObject) ReceiptHash() common.Hash { + return wo.Header().ReceiptHash() +} + +func (wo *WorkObject) Extra() []byte { + return wo.Header().Extra() +} + +func (wo *WorkObject) UTXORoot() common.Hash { + return wo.Header().UTXORoot() +} + +func (wo *WorkObject) EfficiencyScore() uint16 { + return wo.Header().EfficiencyScore() +} + +func (wo *WorkObject) ThresholdCount() uint16 { + return wo.Header().ThresholdCount() +} + +func (wo *WorkObject) ExpansionNumber() uint8 { + return wo.Header().ExpansionNumber() +} + +func (wo *WorkObject) EtxEligibleSlices() common.Hash { + return wo.Header().EtxEligibleSlices() +} + +func (wo *WorkObject) InterlinkRootHash() common.Hash { + return wo.Header().InterlinkRootHash() +} + +func (wo *WorkObject) PrimeTerminus() common.Hash { + return wo.Header().PrimeTerminus() +} +func (wo *WorkObject) Transactions() Transactions { + return wo.Body().Transactions() +} + +func (wo *WorkObject) ExtTransactions() Transactions { + return wo.Body().ExtTransactions() +} + +func (wo *WorkObject) Uncles() []*WorkObjectHeader { + return wo.Body().Uncles() +} + +func (wo *WorkObject) Manifest() BlockManifest { + return wo.Body().Manifest() +} + +func (wo *WorkObject) InterlinkHashes() common.Hashes { + return wo.Body().InterlinkHashes() +} + +func (wo *WorkObject) QiTransactions() []*Transaction { + return wo.Body().QiTransactions() +} + +func (wo *WorkObject) QuaiTransactions() []*Transaction { + return wo.Body().QuaiTransactions() +} + +func (wo *WorkObject) NumberArray() []*big.Int { + numArray := make([]*big.Int, common.HierarchyDepth) + for i := 0; i < common.HierarchyDepth; i++ { + numArray[i] = wo.Number(i) + } + return numArray +} + +func (wo *WorkObject) SetMixHash(mixHash common.Hash) { + wo.woHeader.mixHash = mixHash +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Generic Setters /////////////// +//////////////////////////////////////////////////////////// + +func (wo *WorkObject) SetParentHash(val common.Hash, nodeCtx int) { + if nodeCtx == common.ZONE_CTX { + wo.WorkObjectHeader().SetParentHash(val) + } else { + wo.Body().Header().SetParentHash(val, nodeCtx) + } +} + +func (wo *WorkObject) SetNumber(val *big.Int, nodeCtx int) { + if nodeCtx == common.ZONE_CTX { + wo.WorkObjectHeader().SetNumber(val) + } else { + wo.Body().Header().SetNumber(val, nodeCtx) + } +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Header Getters /////////////// +//////////////////////////////////////////////////////////// + +func (wh *WorkObjectHeader) HeaderHash() common.Hash { + return wh.headerHash +} + +func (wh *WorkObjectHeader) ParentHash() common.Hash { + return wh.parentHash +} + +func (wh *WorkObjectHeader) Number() *big.Int { + return wh.number +} + +func (wh *WorkObjectHeader) NumberU64() uint64 { + return wh.number.Uint64() +} + +func (wh *WorkObjectHeader) Difficulty() *big.Int { + return wh.difficulty +} + +func (wh *WorkObjectHeader) TxHash() common.Hash { + return wh.txHash +} + +func (wh *WorkObjectHeader) Location() common.Location { + return wh.location +} + +func (wh *WorkObjectHeader) MixHash() common.Hash { + return wh.mixHash +} + +func (wh *WorkObjectHeader) Nonce() BlockNonce { + return wh.nonce +} + +func (wh *WorkObjectHeader) NonceU64() uint64 { + return wh.nonce.Uint64() +} + +func (wh *WorkObjectHeader) Time() uint64 { + return wh.time +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Header Setters /////////////// +//////////////////////////////////////////////////////////// + +func (wh *WorkObjectHeader) SetHeaderHash(headerHash common.Hash) { + wh.headerHash = headerHash +} + +func (wh *WorkObjectHeader) SetParentHash(parentHash common.Hash) { + wh.parentHash = parentHash +} + +func (wh *WorkObjectHeader) SetNumber(number *big.Int) { + wh.number = number +} + +func (wh *WorkObjectHeader) SetDifficulty(difficulty *big.Int) { + wh.difficulty = difficulty +} + +func (wh *WorkObjectHeader) SetTxHash(txHash common.Hash) { + wh.txHash = txHash +} + +func (wh *WorkObjectHeader) SetLocation(location common.Location) { + wh.location = location +} + +func (wh *WorkObjectHeader) SetMixHash(mixHash common.Hash) { + wh.mixHash = mixHash +} + +func (wh *WorkObjectHeader) SetNonce(nonce BlockNonce) { + wh.nonce = nonce +} + +func (wh *WorkObjectHeader) SetTime(val uint64) { + wh.time = val +} + +type WorkObjectBody struct { + header *Header + transactions Transactions + extTransactions Transactions + uncles []*WorkObjectHeader + manifest BlockManifest + interlinkHashes common.Hashes +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Body Setters /////////////// +//////////////////////////////////////////////////////////// + +func (wb *WorkObjectBody) SetHeader(header *Header) { + wb.header = header +} + +func (wb *WorkObjectBody) SetTransactions(transactions []*Transaction) { + wb.transactions = transactions +} + +func (wb *WorkObjectBody) SetExtTransactions(transactions []*Transaction) { + wb.extTransactions = transactions +} + +func (wb *WorkObjectBody) SetUncles(uncles []*WorkObjectHeader) { + wb.uncles = uncles +} + +func (wb *WorkObjectBody) SetManifest(manifest BlockManifest) { + wb.manifest = manifest +} + +func (wb *WorkObjectBody) SetInterlinkHashes(interlinkHashes common.Hashes) { + wb.interlinkHashes = interlinkHashes +} + +func (wb *WorkObjectBody) CopyTransactions(transactions []*Transaction) { + wb.transactions = transactions +} + +func (wb *WorkObjectBody) CopyExtTransactions(transactions []*Transaction) { + wb.extTransactions = transactions +} + +func (wb *WorkObjectBody) CopyUncles(uncles []*WorkObjectHeader) { + wb.uncles = uncles +} + +func (wb *WorkObjectBody) CopyManifest(manifest BlockManifest) { + wb.manifest = manifest +} + +func (wb *WorkObjectBody) CopyInterlinkHashes(interlinkHashes common.Hashes) { + wb.interlinkHashes = interlinkHashes +} + +//////////////////////////////////////////////////////////// +/////////////////// Work Object Body Getters /////////////// +//////////////////////////////////////////////////////////// + +func (wb *WorkObjectBody) Header() *Header { + return wb.header +} + +func (wb *WorkObjectBody) Transactions() []*Transaction { + return wb.transactions +} + +func (wb *WorkObjectBody) ExtTransactions() []*Transaction { + return wb.extTransactions +} + +func (wb *WorkObjectBody) Uncles() []*WorkObjectHeader { + return wb.uncles +} + +func (wb *WorkObjectBody) Manifest() BlockManifest { + return wb.manifest +} + +func (wb *WorkObjectBody) InterlinkHashes() common.Hashes { + return wb.interlinkHashes +} + +func (wb *WorkObjectBody) QiTransactions() []*Transaction { + // TODO: cache the UTXO loop + qiTxs := make([]*Transaction, 0) + for _, t := range wb.Transactions() { + if t.Type() == QiTxType { + qiTxs = append(qiTxs, t) + } + } + return qiTxs +} + +func (wb *WorkObjectBody) QuaiTransactions() []*Transaction { + quaiTxs := make([]*Transaction, 0) + for _, t := range wb.Transactions() { + if t.Type() != QiTxType { + quaiTxs = append(quaiTxs, t) + } + } + return quaiTxs +} + +func CalcUncleHash(uncles []*WorkObjectHeader) common.Hash { + if len(uncles) == 0 { + return EmptyUncleHash + } + return RlpHash(uncles) +} + +//////////////////////////////////////////////////////////// +/////////////////// New Object Creation Methods //////////// +//////////////////////////////////////////////////////////// + +func NewWorkObject(woHeader *WorkObjectHeader, woBody *WorkObjectBody, tx *Transaction, woType int) *WorkObject { + switch woType { + default: + return &WorkObject{ + woHeader: woHeader, + woBody: woBody, + tx: tx, + } + } +} + +func NewWorkObjectWithHeaderAndTx(header *WorkObjectHeader, tx *Transaction) *WorkObject { + return &WorkObject{woHeader: CopyWorkObjectHeader(header), tx: tx} +} + +func (wo *WorkObject) WithBody(header *Header, txs []*Transaction, etxs []*Transaction, uncles []*WorkObjectHeader, manifest BlockManifest, interlinkHashes common.Hashes) *WorkObject { + woBody := &WorkObjectBody{ + header: CopyHeader(header), + transactions: make([]*Transaction, len(txs)), + uncles: make([]*WorkObjectHeader, len(uncles)), + extTransactions: make([]*Transaction, len(etxs)), + manifest: make(BlockManifest, len(manifest)), + interlinkHashes: make(common.Hashes, len(interlinkHashes)), + } + copy(woBody.transactions, txs) + copy(woBody.uncles, uncles) + copy(woBody.extTransactions, etxs) + copy(woBody.manifest, manifest) + copy(woBody.interlinkHashes, interlinkHashes) + for i := range uncles { + woBody.uncles[i] = CopyWorkObjectHeader(uncles[i]) + } + + newWo := &WorkObject{ + woHeader: CopyWorkObjectHeader(wo.woHeader), + woBody: woBody, + tx: wo.tx, + } + return newWo +} + +func NewWorkObjectBody(header *Header, txs []*Transaction, etxs []*Transaction, uncles []*WorkObjectHeader, manifest BlockManifest, receipts []*Receipt, hasher TrieHasher, nodeCtx int) *WorkObjectBody { + b := &WorkObjectBody{} + b.SetHeader(CopyHeader(header)) + + if len(txs) == 0 { + b.Header().SetTxHash(EmptyRootHash) + } else { + b.Header().SetTxHash(DeriveSha(Transactions(txs), hasher)) + b.transactions = make(Transactions, len(txs)) + copy(b.transactions, txs) + } + + if len(receipts) == 0 { + b.Header().SetReceiptHash(EmptyRootHash) + } else { + b.Header().SetReceiptHash(DeriveSha(Receipts(receipts), hasher)) + } + + if len(uncles) == 0 { + b.Header().SetUncleHash(EmptyUncleHash) + } else { + b.Header().SetUncleHash(CalcUncleHash(uncles)) + b.uncles = make([]*WorkObjectHeader, len(uncles)) + for i := range uncles { + b.uncles[i] = CopyWorkObjectHeader(uncles[i]) + } + } + + if len(etxs) == 0 { + b.Header().SetEtxHash(EmptyRootHash) + } else { + b.Header().SetEtxHash(DeriveSha(Transactions(etxs), hasher)) + b.extTransactions = make(Transactions, len(etxs)) + copy(b.extTransactions, etxs) + } + + // Since the subordinate's manifest lives in our body, we still need to check + // that the manifest matches the subordinate's manifest hash, but we do not set + // the subordinate's manifest hash. + subManifestHash := EmptyRootHash + if len(manifest) != 0 { + subManifestHash = DeriveSha(manifest, hasher) + b.manifest = make(BlockManifest, len(manifest)) + copy(b.manifest, manifest) + } + if nodeCtx < common.ZONE_CTX && subManifestHash != b.Header().ManifestHash(nodeCtx+1) { + log.Global.Error("attempted to build block with invalid subordinate manifest") + return nil + } + + return b +} + +func NewWorkObjectWithHeader(header *WorkObject, tx *Transaction, nodeCtx int, woType int) *WorkObject { + woHeader := NewWorkObjectHeader(header.Hash(), header.ParentHash(common.ZONE_CTX), header.Number(common.ZONE_CTX), header.woHeader.difficulty, header.woHeader.txHash, header.woHeader.nonce, header.woHeader.time, header.Location()) + woBody := NewWorkObjectBody(header.Body().Header(), nil, nil, nil, nil, nil, nil, nodeCtx) + return NewWorkObject(woHeader, woBody, tx, woType) +} + +func CopyWorkObject(wo *WorkObject) *WorkObject { + newWo := &WorkObject{ + woHeader: CopyWorkObjectHeader(wo.woHeader), + woBody: CopyWorkObjectBody(wo.woBody), + tx: wo.tx, + } + return newWo +} +func (wo *WorkObject) RPCMarshalWorkObject() map[string]interface{} { + result := map[string]interface{}{ + "woHeader": wo.woHeader.RPCMarshalWorkObjectHeader(), + } + if wo.woBody != nil { + result["woBody"] = wo.woBody.RPCMarshalWorkObjectBody() + } + if wo.tx != nil { + result["tx"] = wo.tx + } + return result +} + +func (wo *WorkObject) ProtoEncode(woType int) (*ProtoWorkObject, error) { + switch woType { + case PEtxObject: + header, err := wo.woHeader.ProtoEncode() + if err != nil { + return nil, err + } + bodyHeader, err := wo.woBody.header.ProtoEncode() + if err != nil { + return nil, errors.New("error encoding work object body header") + } + return &ProtoWorkObject{ + WoHeader: header, + WoBody: &ProtoWorkObjectBody{Header: bodyHeader}, + }, nil + default: + header, err := wo.woHeader.ProtoEncode() + if err != nil { + return nil, err + } + body, err := wo.woBody.ProtoEncode() + if err != nil { + return nil, err + } + if wo.tx == nil { + return &ProtoWorkObject{ + WoHeader: header, + WoBody: body, + }, nil + } else { + tx, err := wo.tx.ProtoEncode() + if err != nil { + return nil, err + } + return &ProtoWorkObject{ + WoHeader: header, + WoBody: body, + Tx: tx, + }, nil + } + } +} + +func (wo *WorkObject) ProtoDecode(data *ProtoWorkObject, location common.Location, woType int) error { + switch woType { + case PEtxObject: + wo.woHeader = new(WorkObjectHeader) + err := wo.woHeader.ProtoDecode(data.GetWoHeader()) + if err != nil { + return err + } + wo.woBody = new(WorkObjectBody) + bodyHeader := new(Header) + bodyHeader.ProtoDecode(data.GetWoBody().Header, location) + wo.woBody.SetHeader(bodyHeader) + default: + wo.woHeader = new(WorkObjectHeader) + err := wo.woHeader.ProtoDecode(data.GetWoHeader()) + if err != nil { + return err + } + wo.woBody = new(WorkObjectBody) + err = wo.woBody.ProtoDecode(data.GetWoBody(), location) + if err != nil { + return err + } + if data.Tx != nil { + wo.tx = new(Transaction) + err = wo.tx.ProtoDecode(data.GetTx(), location) + if err != nil { + return err + } + } + } + return nil +} + +func NewWorkObjectHeader(headerHash common.Hash, parentHash common.Hash, number *big.Int, difficulty *big.Int, txHash common.Hash, nonce BlockNonce, time uint64, location common.Location) *WorkObjectHeader { + return &WorkObjectHeader{ + headerHash: headerHash, + parentHash: parentHash, + number: number, + difficulty: difficulty, + txHash: txHash, + nonce: nonce, + time: time, + location: location, + } +} + +func CopyWorkObjectHeader(wh *WorkObjectHeader) *WorkObjectHeader { + cpy := *wh + cpy.SetHeaderHash(wh.HeaderHash()) + cpy.SetParentHash(wh.ParentHash()) + cpy.SetNumber(new(big.Int).Set(wh.Number())) + cpy.SetDifficulty(new(big.Int).Set(wh.Difficulty())) + cpy.SetTxHash(wh.TxHash()) + cpy.SetNonce(wh.Nonce()) + cpy.SetMixHash(wh.MixHash()) + cpy.SetLocation(wh.Location()) + cpy.SetTime(wh.Time()) + return &cpy +} + +func (wh *WorkObjectHeader) RPCMarshalWorkObjectHeader() map[string]interface{} { + result := map[string]interface{}{ + "headerHash": wh.HeaderHash(), + "parentHash": wh.ParentHash(), + "number": (*hexutil.Big)(wh.Number()), + "difficulty": (*hexutil.Big)(wh.Difficulty()), + "nonce": wh.Nonce(), + "location": hexutil.Bytes(wh.Location()), + "txHash": wh.TxHash(), + "time": hexutil.Uint64(wh.Time()), + "mixHash": wh.MixHash(), + } + return result +} + +func (wh *WorkObjectHeader) Hash() (hash common.Hash) { + sealHash := wh.SealHash().Bytes() + hasherMu.Lock() + defer hasherMu.Unlock() + hasher.Reset() + var hData [40]byte + copy(hData[:], wh.Nonce().Bytes()) + copy(hData[len(wh.nonce):], sealHash) + sum := blake3.Sum256(hData[:]) + hash.SetBytes(sum[:]) + return hash +} + +func (wh *WorkObjectHeader) SealHash() (hash common.Hash) { + hasherMu.Lock() + defer hasherMu.Unlock() + hasher.Reset() + protoSealData := wh.SealEncode() + data, err := proto.Marshal(protoSealData) + if err != nil { + log.Global.Error("Failed to marshal seal data ", "err", err) + } + sum := blake3.Sum256(data[:]) + hash.SetBytes(sum[:]) + return hash +} + +func (wh *WorkObjectHeader) SealEncode() *ProtoWorkObjectHeader { + hash := common.ProtoHash{Value: wh.HeaderHash().Bytes()} + parentHash := common.ProtoHash{Value: wh.ParentHash().Bytes()} + txHash := common.ProtoHash{Value: wh.TxHash().Bytes()} + number := wh.Number().Bytes() + difficulty := wh.Difficulty().Bytes() + location := wh.Location().ProtoEncode() + time := wh.Time() + + return &ProtoWorkObjectHeader{ + HeaderHash: &hash, + ParentHash: &parentHash, + Number: number, + Difficulty: difficulty, + TxHash: &txHash, + Location: location, + Time: &time, + } +} + +func (wh *WorkObjectHeader) ProtoEncode() (*ProtoWorkObjectHeader, error) { + hash := common.ProtoHash{Value: wh.HeaderHash().Bytes()} + parentHash := common.ProtoHash{Value: wh.ParentHash().Bytes()} + txHash := common.ProtoHash{Value: wh.TxHash().Bytes()} + number := wh.Number().Bytes() + difficulty := wh.Difficulty().Bytes() + location := wh.Location().ProtoEncode() + nonce := wh.Nonce().Uint64() + mixHash := common.ProtoHash{Value: wh.MixHash().Bytes()} + + return &ProtoWorkObjectHeader{ + HeaderHash: &hash, + ParentHash: &parentHash, + Number: number, + Difficulty: difficulty, + TxHash: &txHash, + Location: location, + Nonce: &nonce, + MixHash: &mixHash, + Time: &wh.time, + }, nil +} + +func (wh *WorkObjectHeader) ProtoDecode(data *ProtoWorkObjectHeader) error { + if data.HeaderHash == nil || data.ParentHash == nil || data.Number == nil || data.Difficulty == nil || data.TxHash == nil || data.Nonce == nil || data.Location == nil { + err := errors.New("failed to decode work object header") + log.Global.WithField("err", err).Warn() + return err + } + wh.SetHeaderHash(common.BytesToHash(data.GetHeaderHash().Value)) + wh.SetParentHash(common.BytesToHash(data.GetParentHash().Value)) + wh.SetNumber(new(big.Int).SetBytes(data.GetNumber())) + wh.SetDifficulty(new(big.Int).SetBytes(data.Difficulty)) + wh.SetTxHash(common.BytesToHash(data.GetTxHash().Value)) + wh.SetNonce(uint64ToByteArr(data.GetNonce())) + wh.SetLocation(data.GetLocation().GetValue()) + wh.SetMixHash(common.BytesToHash(data.GetMixHash().Value)) + wh.SetTime(data.GetTime()) + + return nil +} + +func CopyWorkObjectBody(wb *WorkObjectBody) *WorkObjectBody { + cpy := &WorkObjectBody{header: CopyHeader(wb.header)} + cpy.SetTransactions(wb.Transactions()) + cpy.SetExtTransactions(wb.ExtTransactions()) + cpy.SetUncles(wb.Uncles()) + cpy.SetManifest(wb.Manifest()) + cpy.SetInterlinkHashes(wb.InterlinkHashes()) + + return cpy +} + +func (wb *WorkObjectBody) ProtoEncode() (*ProtoWorkObjectBody, error) { + header, err := wb.header.ProtoEncode() + if err != nil { + return nil, err + } + + protoTransactions, err := wb.transactions.ProtoEncode() + if err != nil { + return nil, err + } + + protoExtTransactions, err := wb.extTransactions.ProtoEncode() + if err != nil { + return nil, err + } + + protoUncles := &ProtoWorkObjectHeaders{} + for _, unc := range wb.uncles { + protoUncle, err := unc.ProtoEncode() + if err != nil { + return nil, err + } + protoUncles.WoHeaders = append(protoUncles.WoHeaders, protoUncle) + } + + protoManifest, err := wb.manifest.ProtoEncode() + if err != nil { + return nil, err + } + + protoInterlinkHashes := wb.interlinkHashes.ProtoEncode() + + return &ProtoWorkObjectBody{ + Header: header, + Transactions: protoTransactions, + ExtTransactions: protoExtTransactions, + Uncles: protoUncles, + Manifest: protoManifest, + InterlinkHashes: protoInterlinkHashes, + }, nil +} + +func (wb *WorkObjectBody) ProtoDecode(data *ProtoWorkObjectBody, location common.Location) error { + wb.header = &Header{} + err := wb.header.ProtoDecode(data.GetHeader(), location) + if err != nil { + return err + } + wb.transactions = Transactions{} + err = wb.transactions.ProtoDecode(data.GetTransactions(), location) + if err != nil { + return err + } + wb.extTransactions = Transactions{} + err = wb.extTransactions.ProtoDecode(data.GetExtTransactions(), location) + if err != nil { + return err + } + wb.uncles = make([]*WorkObjectHeader, len(data.GetUncles().GetWoHeaders())) + for i, protoUncle := range data.GetUncles().GetWoHeaders() { + uncle := &WorkObjectHeader{} + err = uncle.ProtoDecode(protoUncle) + if err != nil { + return err + } + wb.uncles[i] = uncle + } + wb.manifest = BlockManifest{} + err = wb.manifest.ProtoDecode(data.GetManifest()) + if err != nil { + return err + } + wb.interlinkHashes = common.Hashes{} + wb.interlinkHashes.ProtoDecode(data.GetInterlinkHashes()) + + return nil +} + +func (wb *WorkObjectBody) RPCMarshalWorkObjectBody() map[string]interface{} { + result := map[string]interface{}{ + "header": wb.header.RPCMarshalHeader(), + "transactions": wb.Transactions(), + "extTransactions": wb.ExtTransactions(), + "manifest": wb.Manifest(), + "interlinkHashes": wb.InterlinkHashes(), + } + + workedUncles := make([]map[string]interface{}, len(wb.Uncles())) + for i, uncle := range wb.Uncles() { + workedUncles[i] = uncle.RPCMarshalWorkObjectHeader() + } + result["uncles"] = workedUncles + + return result +} diff --git a/core/worker.go b/core/worker.go index 744bacea51..95164b523c 100644 --- a/core/worker.go +++ b/core/worker.go @@ -68,14 +68,14 @@ type environment struct { etxRLimit int // Remaining number of cross-region ETXs that can be included etxPLimit int // Remaining number of cross-prime ETXs that can be included - header *types.Header + wo *types.WorkObject txs []*types.Transaction etxs []*types.Transaction utxoFees *big.Int subManifest types.BlockManifest receipts []*types.Receipt uncleMu sync.RWMutex - uncles map[common.Hash]*types.Header + uncles map[common.Hash]*types.WorkObjectHeader } // copy creates a deep copy of environment. @@ -90,7 +90,7 @@ func (env *environment) copy(processingState bool, nodeCtx int) *environment { coinbase: env.coinbase, etxRLimit: env.etxRLimit, etxPLimit: env.etxPLimit, - header: types.CopyHeader(env.header), + wo: types.CopyWorkObject(env.wo), receipts: copyReceipts(env.receipts), utxoFees: new(big.Int).Set(env.utxoFees), } @@ -106,22 +106,22 @@ func (env *environment) copy(processingState bool, nodeCtx int) *environment { copy(cpy.etxs, env.etxs) env.uncleMu.Lock() - cpy.uncles = make(map[common.Hash]*types.Header) + cpy.uncles = make(map[common.Hash]*types.WorkObjectHeader) for hash, uncle := range env.uncles { cpy.uncles[hash] = uncle } env.uncleMu.Unlock() return cpy } else { - return &environment{header: types.CopyHeader(env.header)} + return &environment{wo: types.CopyWorkObject(env.wo)} } } // unclelist returns the contained uncles as the list format. -func (env *environment) unclelist() []*types.Header { +func (env *environment) unclelist() []*types.WorkObjectHeader { env.uncleMu.RLock() defer env.uncleMu.RUnlock() - var uncles []*types.Header + var uncles []*types.WorkObjectHeader for _, uncle := range env.uncles { uncles = append(uncles, uncle) } @@ -142,7 +142,7 @@ func (env *environment) discard() { type task struct { receipts []*types.Receipt state *state.StateDB - block *types.Block + block *types.WorkObject createdAt time.Time } @@ -193,7 +193,7 @@ type worker struct { // Channels taskCh chan *task - resultCh chan *types.Block + resultCh chan *types.WorkObject exitCh chan struct{} resubmitIntervalCh chan time.Duration resubmitAdjustCh chan *intervalAdjust @@ -205,8 +205,8 @@ type worker struct { wg sync.WaitGroup - localUncles map[common.Hash]*types.Block // A set of side blocks generated locally as the possible uncle blocks. - remoteUncles map[common.Hash]*types.Block // A set of side blocks as the possible uncle blocks. + localUncles map[common.Hash]*types.WorkObjectHeader // A set of side blocks generated locally as the possible uncle blocks. + remoteUncles map[common.Hash]*types.WorkObjectHeader // A set of side blocks as the possible uncle blocks. uncleMu sync.RWMutex mu sync.RWMutex // The lock used to protect the coinbase and extra fields @@ -218,7 +218,7 @@ type worker struct { pendingBlockBody *lru.Cache snapshotMu sync.RWMutex // The lock used to protect the snapshots below - snapshotBlock *types.Block + snapshotBlock *types.WorkObject headerPrints *expireLru.Cache @@ -234,7 +234,7 @@ type worker struct { noempty uint32 // External functions - isLocalBlock func(header *types.Header) bool // Function used to determine whether the specified block is mined by local miner. + isLocalBlock func(header *types.WorkObject) bool // Function used to determine whether the specified block is mined by local miner. // Test hooks newTaskHook func(*task) // Method to call upon receiving a new sealing task. @@ -265,7 +265,7 @@ func (ra *RollingAverage) Average() time.Duration { return ra.sum / time.Duration(len(ra.durations)) } -func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Database, engine consensus.Engine, headerchain *HeaderChain, txPool *TxPool, isLocalBlock func(header *types.Header) bool, init bool, processingState bool, logger *log.Logger) *worker { +func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Database, engine consensus.Engine, headerchain *HeaderChain, txPool *TxPool, isLocalBlock func(header *types.WorkObject) bool, init bool, processingState bool, logger *log.Logger) *worker { worker := &worker{ config: config, chainConfig: chainConfig, @@ -275,12 +275,12 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Databas coinbase: config.Etherbase, isLocalBlock: isLocalBlock, workerDb: db, - localUncles: make(map[common.Hash]*types.Block), - remoteUncles: make(map[common.Hash]*types.Block), + localUncles: make(map[common.Hash]*types.WorkObjectHeader), + remoteUncles: make(map[common.Hash]*types.WorkObjectHeader), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), chainSideCh: make(chan ChainSideEvent, chainSideChanSize), taskCh: make(chan *task), - resultCh: make(chan *types.Block, resultQueueSize), + resultCh: make(chan *types.WorkObject, resultQueueSize), exitCh: make(chan struct{}), interrupt: make(chan struct{}), resubmitIntervalCh: make(chan time.Duration), @@ -357,7 +357,7 @@ func (w *worker) enablePreseal() { } // pending returns the pending state and corresponding block. -func (w *worker) pending() *types.Block { +func (w *worker) pending() *types.WorkObject { // return a snapshot to avoid contention on currentMu mutex w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() @@ -365,7 +365,7 @@ func (w *worker) pending() *types.Block { } // pendingBlock returns pending block. -func (w *worker) pendingBlock() *types.Block { +func (w *worker) pendingBlock() *types.WorkObject { // return a snapshot to avoid contention on currentMu mutex w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() @@ -373,7 +373,7 @@ func (w *worker) pendingBlock() *types.Block { } // pendingBlockAndReceipts returns pending block and corresponding receipts. -func (w *worker) pendingBlockAndReceipts() (*types.Block, types.Receipts) { +func (w *worker) pendingBlockAndReceipts() (*types.WorkObject, types.Receipts) { // return a snapshot to avoid contention on currentMu mutex w.snapshotMu.RLock() defer w.snapshotMu.RUnlock() @@ -413,9 +413,9 @@ func (w *worker) LoadPendingBlockBody() { pendingBlockBodykeys := rawdb.ReadPbBodyKeys(w.workerDb) for _, key := range pendingBlockBodykeys { if key == types.EmptyBodyHash { - w.pendingBlockBody.Add(key, &types.Body{}) + w.pendingBlockBody.Add(key, &types.WorkObject{}) } else { - w.pendingBlockBody.Add(key, rawdb.ReadPbCacheBody(w.workerDb, key, w.hc.NodeLocation())) + w.pendingBlockBody.Add(key, rawdb.ReadPbCacheBody(w.workerDb, key)) } // Remove the entry from the database so that body is not accumulated over multiple stops rawdb.DeletePbCacheBody(w.workerDb, key) @@ -432,7 +432,7 @@ func (w *worker) StorePendingBlockBody() { if value, exist := pendingBlockBody.Peek(key); exist { pendingBlockBodyKeys = append(pendingBlockBodyKeys, key.(common.Hash)) if key.(common.Hash) != types.EmptyBodyHash { - rawdb.WritePbCacheBody(w.workerDb, key.(common.Hash), value.(*types.Body)) + rawdb.WritePbCacheBody(w.workerDb, key.(common.Hash), value.(*types.WorkObject)) } } } @@ -455,8 +455,8 @@ func (w *worker) asyncStateLoop() { w.interrupt = make(chan struct{}) return default: - block := head.Block - header, err := w.GeneratePendingHeader(block, true) + wo := head.Block + header, err := w.GeneratePendingHeader(wo, true) if err != nil { w.logger.WithField("err", err).Error("Error generating pending header") return @@ -470,30 +470,30 @@ func (w *worker) asyncStateLoop() { go func() { if side.ResetUncles { w.uncleMu.Lock() - w.localUncles = make(map[common.Hash]*types.Block) - w.remoteUncles = make(map[common.Hash]*types.Block) + w.localUncles = make(map[common.Hash]*types.WorkObjectHeader) + w.remoteUncles = make(map[common.Hash]*types.WorkObjectHeader) w.uncleMu.Unlock() } - for _, block := range side.Blocks { + for _, wo := range side.Blocks { // Short circuit for duplicate side blocks w.uncleMu.RLock() - if _, exists := w.localUncles[block.Hash()]; exists { + if _, exists := w.localUncles[wo.Hash()]; exists { w.uncleMu.RUnlock() continue } - if _, exists := w.remoteUncles[block.Hash()]; exists { + if _, exists := w.remoteUncles[wo.Hash()]; exists { w.uncleMu.RUnlock() continue } w.uncleMu.RUnlock() - if w.isLocalBlock != nil && w.isLocalBlock(block.Header()) { + if w.isLocalBlock != nil && w.isLocalBlock(wo) { w.uncleMu.Lock() - w.localUncles[block.Hash()] = block + w.localUncles[wo.Hash()] = wo.WorkObjectHeader() w.uncleMu.Unlock() } else { w.uncleMu.Lock() - w.remoteUncles[block.Hash()] = block + w.remoteUncles[wo.Hash()] = wo.WorkObjectHeader() w.uncleMu.Unlock() } } @@ -509,7 +509,7 @@ func (w *worker) asyncStateLoop() { } // GeneratePendingBlock generates pending block given a commited block. -func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.Header, error) { +func (w *worker) GeneratePendingHeader(block *types.WorkObject, fill bool) (*types.WorkObject, error) { nodeCtx := w.hc.NodeCtx() w.interruptAsyncPhGen() @@ -549,7 +549,7 @@ func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.He work.txs = append(work.txs, types.NewTx(&types.QiTx{})) // placeholder } // Fill pending transactions from the txpool - w.adjustGasLimit(nil, work, block) + w.adjustGasLimit(work, block) work.utxoFees = big.NewInt(0) start := time.Now() etxSet := w.fillTransactions(interrupt, work, block, fill) @@ -563,12 +563,12 @@ func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.He } // Set the etx set commitment in the header if etxSet != nil { - work.header.SetEtxSetHash(etxSet.Hash()) + work.wo.Header().SetEtxSetHash(etxSet.Hash()) } else { - work.header.SetEtxSetHash(types.EmptyEtxSetHash) + work.wo.Header().SetEtxSetHash(types.EmptyEtxSetHash) } if coinbase.IsInQiLedgerScope() { - coinbaseTx, err := createCoinbaseTxWithFees(work.header, work.utxoFees, work.state) + coinbaseTx, err := createCoinbaseTxWithFees(work.wo, work.utxoFees, work.state) if err != nil { return nil, err } @@ -577,26 +577,26 @@ func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.He } // Create a local environment copy, avoid the data race with snapshot state. - newBlock, err := w.FinalizeAssemble(w.hc, work.header, block, work.state, work.txs, work.unclelist(), work.etxs, work.subManifest, work.receipts) + newWo, err := w.FinalizeAssemble(w.hc, work.wo, block, work.state, work.txs, work.unclelist(), work.etxs, work.subManifest, work.receipts) if err != nil { return nil, err } - work.header = newBlock.Header() + work.wo = newWo - w.printPendingHeaderInfo(work, newBlock, start) + w.printPendingHeaderInfo(work, newWo, start) - return work.header, nil + return newWo, nil } // printPendingHeaderInfo logs the pending header information -func (w *worker) printPendingHeaderInfo(work *environment, block *types.Block, start time.Time) { +func (w *worker) printPendingHeaderInfo(work *environment, block *types.WorkObject, start time.Time) { work.uncleMu.RLock() - if w.CurrentInfo(block.Header()) { + if w.CurrentInfo(block) { w.logger.WithFields(log.Fields{ "number": block.Number(w.hc.NodeCtx()), "parent": block.ParentHash(w.hc.NodeCtx()), - "sealhash": block.Header().SealHash(), + "sealhash": block.SealHash(), "uncles": len(work.uncles), "txs": len(work.txs), "etxs": len(block.ExtTransactions()), @@ -610,7 +610,7 @@ func (w *worker) printPendingHeaderInfo(work *environment, block *types.Block, s w.logger.WithFields(log.Fields{ "number": block.Number(w.hc.NodeCtx()), "parent": block.ParentHash(w.hc.NodeCtx()), - "sealhash": block.Header().SealHash(), + "sealhash": block.SealHash(), "uncles": len(work.uncles), "txs": len(work.txs), "etxs": len(block.ExtTransactions()), @@ -642,7 +642,7 @@ func (w *worker) eventExitLoop() { } // makeEnv creates a new environment for the sealing block. -func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase common.Address) (*environment, error) { +func (w *worker) makeEnv(parent *types.WorkObject, proposedWo *types.WorkObject, coinbase common.Address) (*environment, error) { // Retrieve the parent state to execute on top and start a prefetcher for // the miner to speed block sealing up a bit. evmRoot := parent.EVMRoot() @@ -666,29 +666,37 @@ func (w *worker) makeEnv(parent *types.Block, header *types.Header, coinbase com } // Note the passed coinbase may be different with header.Coinbase. env := &environment{ - signer: types.MakeSigner(w.chainConfig, header.Number(w.hc.NodeCtx())), + signer: types.MakeSigner(w.chainConfig, proposedWo.Number(w.hc.NodeCtx())), state: state, coinbase: coinbase, ancestors: mapset.NewSet(), family: mapset.NewSet(), - header: header, - uncles: make(map[common.Hash]*types.Header), + wo: proposedWo, + uncles: make(map[common.Hash]*types.WorkObjectHeader), etxRLimit: etxRLimit, etxPLimit: etxPLimit, } + // when 08 is processed ancestors contain 07 (quick block) + for _, ancestor := range w.hc.GetBlocksFromHash(parent.Header().Hash(), 7) { + for _, uncle := range ancestor.Uncles() { + env.family.Add(uncle.Hash()) + } + env.family.Add(ancestor.Hash()) + env.ancestors.Add(ancestor.Hash()) + } // Keep track of transactions which return errors so they can be removed env.tcount = 0 return env, nil } // commitUncle adds the given block to uncle block set, returns error if failed to add. -func (w *worker) commitUncle(env *environment, uncle *types.Header) error { +func (w *worker) commitUncle(env *environment, uncle *types.WorkObjectHeader) error { env.uncleMu.Lock() defer env.uncleMu.Unlock() hash := uncle.Hash() // when 08 is processed ancestors contain 07 (quick block) - for _, ancestor := range w.hc.GetBlocksFromHash(env.header.ParentHash(common.ZONE_CTX), 7) { + for _, ancestor := range w.hc.GetBlocksFromHash(env.wo.ParentHash(common.ZONE_CTX), 7) { for _, uncle := range ancestor.Uncles() { env.family.Add(uncle.Hash()) } @@ -699,10 +707,10 @@ func (w *worker) commitUncle(env *environment, uncle *types.Header) error { if _, exist := env.uncles[hash]; exist { return errors.New("uncle not unique") } - if env.header.ParentHash(w.hc.NodeCtx()) == uncle.ParentHash(w.hc.NodeCtx()) { + if env.wo.ParentHash(w.hc.NodeCtx()) == uncle.ParentHash() { return errors.New("uncle is sibling") } - if !env.ancestors.Contains(uncle.ParentHash(w.hc.NodeCtx())) { + if !env.ancestors.Contains(uncle.ParentHash()) { return errors.New("uncle's parent unknown") } if env.family.Contains(hash) { @@ -712,7 +720,7 @@ func (w *worker) commitUncle(env *environment, uncle *types.Header) error { return nil } -func (w *worker) commitTransaction(env *environment, parent *types.Header, tx *types.Transaction) ([]*types.Log, error) { +func (w *worker) commitTransaction(env *environment, parent *types.WorkObject, tx *types.Transaction) ([]*types.Log, error) { if tx != nil { if tx.Type() == types.ExternalTxType && tx.To().IsInQiLedgerScope() { if err := env.gasPool.SubGas(params.CallValueTransferGas); err != nil { @@ -722,21 +730,21 @@ func (w *worker) commitTransaction(env *environment, parent *types.Header, tx *t return nil, fmt.Errorf("tx %032x emits UTXO with value greater than max denomination", tx.Hash()) } env.state.CreateUTXO(tx.OriginatingTxHash(), tx.ETXIndex(), types.NewUtxoEntry(types.NewTxOut(uint8(tx.Value().Int64()), tx.To().Bytes()))) - gasUsed := env.header.GasUsed() + gasUsed := env.wo.Header().GasUsed() gasUsed += params.CallValueTransferGas - env.header.SetGasUsed(gasUsed) + env.wo.Header().SetGasUsed(gasUsed) env.txs = append(env.txs, tx) return []*types.Log{}, nil // need to make sure this does not modify receipt hash } snap := env.state.Snapshot() // retrieve the gas used int and pass in the reference to the ApplyTransaction - gasUsed := env.header.GasUsed() - receipt, err := ApplyTransaction(w.chainConfig, parent, w.hc, &env.coinbase, env.gasPool, env.state, env.header, tx, &gasUsed, *w.hc.bc.processor.GetVMConfig(), &env.etxRLimit, &env.etxPLimit, w.logger) + gasUsed := env.wo.GasUsed() + receipt, err := ApplyTransaction(w.chainConfig, parent, w.hc, &env.coinbase, env.gasPool, env.state, env.wo, tx, &gasUsed, *w.hc.bc.processor.GetVMConfig(), &env.etxRLimit, &env.etxPLimit, w.logger) if err != nil { w.logger.WithFields(log.Fields{ "err": err, "tx": tx.Hash().Hex(), - "block": env.header.Number, + "block": env.wo.Number(w.hc.NodeCtx()), "gasUsed": gasUsed, }).Debug("Error playing transaction in worker") env.state.RevertToSnapshot(snap) @@ -748,7 +756,7 @@ func (w *worker) commitTransaction(env *environment, parent *types.Header, tx *t // once the gasUsed pointer is updated in the ApplyTransaction it has to be set back to the env.Header.GasUsed // This extra step is needed because previously the GasUsed was a public method and direct update of the value // was possible. - env.header.SetGasUsed(gasUsed) + env.wo.Header().SetGasUsed(gasUsed) env.txs = append(env.txs, tx) env.receipts = append(env.receipts, receipt) return receipt.Logs, nil @@ -756,8 +764,8 @@ func (w *worker) commitTransaction(env *environment, parent *types.Header, tx *t return nil, errors.New("error finding transaction") } -func (w *worker) commitTransactions(env *environment, parent *types.Header, etxs []*types.Transaction, txs *types.TransactionsByPriceAndNonce, etxSet *types.EtxSet, interrupt *int32) bool { - gasLimit := env.header.GasLimit +func (w *worker) commitTransactions(env *environment, parent *types.WorkObject, etxs []*types.Transaction, txs *types.TransactionsByPriceAndNonce, etxSet *types.EtxSet, interrupt *int32) bool { + gasLimit := env.wo.GasLimit if env.gasPool == nil { env.gasPool = new(GasPool).AddGas(gasLimit()) } @@ -775,11 +783,11 @@ func (w *worker) commitTransactions(env *environment, parent *types.Header, etxs break } // Add ETXs until minimum gas is used - if env.header.GasUsed() >= minEtxGas { + if env.wo.GasUsed() >= minEtxGas { break } - if env.header.GasUsed() > minEtxGas*params.MaximumEtxGasMultiplier { // sanity check, this should never happen - log.Global.WithField("Gas Used", env.header.GasUsed()).Error("Block uses more gas than maximum ETX gas") + if env.wo.GasUsed() > minEtxGas*params.MaximumEtxGasMultiplier { // sanity check, this should never happen + log.Global.WithField("Gas Used", env.wo.GasUsed()).Error("Block uses more gas than maximum ETX gas") return true } hash := etxSet.Pop() @@ -941,36 +949,36 @@ type generateParams struct { // prepareWork constructs the sealing task according to the given parameters, // either based on the last chain head or specified parent. In this function // the pending transactions are not filled yet, only the empty task returned. -func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*environment, error) { +func (w *worker) prepareWork(genParams *generateParams, wo *types.WorkObject) (*environment, error) { w.mu.RLock() defer w.mu.RUnlock() nodeCtx := w.hc.NodeCtx() // Find the parent block for sealing task - parent := block + parent := wo // Sanity check the timestamp correctness, recap the timestamp // to parent+1 if the mutation is allowed. timestamp := genParams.timestamp - if parent.Time() >= timestamp { + if parent.WorkObjectHeader().Time() >= timestamp { if genParams.forceTime { - return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.Time(), timestamp) + return nil, fmt.Errorf("invalid timestamp, parent %d given %d", parent.WorkObjectHeader().Time(), timestamp) } - timestamp = parent.Time() + 1 + timestamp = parent.WorkObjectHeader().Time() + 1 } // Construct the sealing block header, set the extra field if it's allowed num := parent.Number(nodeCtx) - header := types.EmptyHeader() - header.SetParentHash(block.Header().Hash(), nodeCtx) + newWo := types.EmptyHeader(nodeCtx) + newWo.SetParentHash(wo.Hash(), nodeCtx) if w.hc.IsGenesisHash(parent.Hash()) { - header.SetNumber(big.NewInt(1), nodeCtx) + newWo.SetNumber(big.NewInt(1), nodeCtx) } else { - header.SetNumber(big.NewInt(int64(num.Uint64())+1), nodeCtx) + newWo.SetNumber(big.NewInt(int64(num.Uint64())+1), nodeCtx) } - header.SetTime(timestamp) - header.SetLocation(w.hc.NodeLocation()) + newWo.WorkObjectHeader().SetTime(timestamp) + newWo.WorkObjectHeader().SetLocation(w.hc.NodeLocation()) // Only calculate entropy if the parent is not the genesis block - _, order, err := w.engine.CalcOrder(parent.Header()) + _, order, err := w.engine.CalcOrder(parent) if err != nil { return nil, err } @@ -978,15 +986,15 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en // Set the parent delta S prior to sending to sub if nodeCtx != common.PRIME_CTX { if order < nodeCtx { - header.SetParentDeltaS(big.NewInt(0), nodeCtx) + newWo.Header().SetParentDeltaS(big.NewInt(0), nodeCtx) } else { - header.SetParentDeltaS(w.engine.DeltaLogS(w.hc, parent.Header()), nodeCtx) + newWo.Header().SetParentDeltaS(w.engine.DeltaLogS(w.hc, parent), nodeCtx) } } - header.SetParentEntropy(w.engine.TotalLogS(w.hc, parent.Header()), nodeCtx) + newWo.Header().SetParentEntropy(w.engine.TotalLogS(w.hc, parent), nodeCtx) } else { - header.SetParentEntropy(big.NewInt(0), nodeCtx) - header.SetParentDeltaS(big.NewInt(0), nodeCtx) + newWo.Header().SetParentEntropy(big.NewInt(0), nodeCtx) + newWo.Header().SetParentDeltaS(big.NewInt(0), nodeCtx) } // Only calculate entropy if the parent is not the genesis block @@ -994,34 +1002,34 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en // Set the parent delta S prior to sending to sub if nodeCtx != common.PRIME_CTX { if order < nodeCtx { - header.SetParentUncledSubDeltaS(big.NewInt(0), nodeCtx) + newWo.Header().SetParentUncledSubDeltaS(big.NewInt(0), nodeCtx) } else { - header.SetParentUncledSubDeltaS(w.engine.UncledSubDeltaLogS(w.hc, parent.Header()), nodeCtx) + newWo.Header().SetParentUncledSubDeltaS(w.engine.UncledSubDeltaLogS(w.hc, parent), nodeCtx) } } } else { - header.SetParentUncledSubDeltaS(big.NewInt(0), nodeCtx) + newWo.Header().SetParentUncledSubDeltaS(big.NewInt(0), nodeCtx) } // calculate the expansion values - except for the etxEligibleSlices, the // zones cannot modify any of the other fields its done in prime if nodeCtx == common.PRIME_CTX { if parent.NumberU64(common.PRIME_CTX) == 0 { - header.SetEfficiencyScore(0) - header.SetThresholdCount(0) - header.SetExpansionNumber(0) + newWo.Header().SetEfficiencyScore(0) + newWo.Header().SetThresholdCount(0) + newWo.Header().SetExpansionNumber(0) } else { // compute the efficiency score at each prime block - efficiencyScore := w.hc.ComputeEfficiencyScore(parent.Header()) - header.SetEfficiencyScore(efficiencyScore) + efficiencyScore := w.hc.ComputeEfficiencyScore(parent) + newWo.Header().SetEfficiencyScore(efficiencyScore) // If the threshold count is zero we have not started considering for the // expansion if parent.Header().ThresholdCount() == 0 { if efficiencyScore > params.TREE_EXPANSION_THRESHOLD { - header.SetThresholdCount(parent.Header().ThresholdCount() + 1) + newWo.Header().SetThresholdCount(parent.Header().ThresholdCount() + 1) } else { - header.SetThresholdCount(0) + newWo.Header().SetThresholdCount(0) } } else { // If the efficiency score goes below the threshold, and we still have @@ -1030,18 +1038,18 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en // threshold count if (parent.Header().ThresholdCount() < params.TREE_EXPANSION_TRIGGER_WINDOW && efficiencyScore < params.TREE_EXPANSION_THRESHOLD) || parent.Header().ThresholdCount() >= params.TREE_EXPANSION_TRIGGER_WINDOW+params.TREE_EXPANSION_WAIT_COUNT { - header.SetThresholdCount(0) + newWo.Header().SetThresholdCount(0) } else { - header.SetThresholdCount(parent.Header().ThresholdCount() + 1) + newWo.Header().SetThresholdCount(parent.Header().ThresholdCount() + 1) } } // Expansion happens when the threshold count is greater than the // expansion threshold and we cross the tree expansion trigger window if parent.Header().ThresholdCount() >= params.TREE_EXPANSION_TRIGGER_WINDOW+params.TREE_EXPANSION_WAIT_COUNT { - header.SetExpansionNumber(parent.Header().ExpansionNumber() + 1) + newWo.Header().SetExpansionNumber(parent.Header().ExpansionNumber() + 1) } else { - header.SetExpansionNumber(parent.Header().ExpansionNumber()) + newWo.Header().SetExpansionNumber(parent.Header().ExpansionNumber()) } } } @@ -1050,22 +1058,22 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en if nodeCtx == common.ZONE_CTX { if order == common.PRIME_CTX { // Set the prime terminus - header.SetPrimeTerminus(parent.Hash()) + newWo.Header().SetPrimeTerminus(parent.Hash()) } else { if w.hc.IsGenesisHash(parent.Hash()) { - header.SetPrimeTerminus(parent.Hash()) + newWo.Header().SetPrimeTerminus(parent.Hash()) } else { // carry the prime terminus from the parent block - header.SetPrimeTerminus(parent.Header().PrimeTerminus()) + newWo.Header().SetPrimeTerminus(parent.Header().PrimeTerminus()) } } } if nodeCtx == common.PRIME_CTX { if w.hc.IsGenesisHash(parent.Hash()) { - header.SetEtxEligibleSlices(parent.Header().EtxEligibleSlices()) + newWo.Header().SetEtxEligibleSlices(parent.Header().EtxEligibleSlices()) } else { - header.SetEtxEligibleSlices(w.hc.UpdateEtxEligibleSlices(parent.Header(), parent.Location())) + newWo.Header().SetEtxEligibleSlices(w.hc.UpdateEtxEligibleSlices(parent, parent.Location())) } } @@ -1076,7 +1084,7 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en interlinkHashes = common.Hashes{parent.Hash(), parent.Hash(), parent.Hash(), parent.Hash()} } else { // check if parent belongs to any interlink level - rank, err := w.engine.CalcRank(w.hc, parent.Header()) + rank, err := w.engine.CalcRank(w.hc, parent) if err != nil { return nil, err } @@ -1095,41 +1103,44 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en // Store the interlink hashes in the database rawdb.WriteInterlinkHashes(w.workerDb, parent.Hash(), interlinkHashes) interlinkRootHash := types.DeriveSha(interlinkHashes, trie.NewStackTrie(nil)) - header.SetInterlinkRootHash(interlinkRootHash) + newWo.Header().SetInterlinkRootHash(interlinkRootHash) } // Only zone should calculate state if nodeCtx == common.ZONE_CTX && w.hc.ProcessingState() { - header.SetExtra(w.extra) - header.SetBaseFee(misc.CalcBaseFee(w.chainConfig, parent.Header())) + newWo.Header().SetExtra(w.extra) + newWo.Header().SetBaseFee(misc.CalcBaseFee(w.chainConfig, parent)) if w.isRunning() { if w.coinbase.Equal(common.Zero) { w.logger.Error("Refusing to mine without etherbase") return nil, errors.New("refusing to mine without etherbase") } - header.SetCoinbase(w.coinbase) + newWo.Header().SetCoinbase(w.coinbase) } // Run the consensus preparation with the default or customized consensus engine. - if err := w.engine.Prepare(w.hc, header, block.Header()); err != nil { + if err := w.engine.Prepare(w.hc, newWo, wo); err != nil { w.logger.WithField("err", err).Error("Failed to prepare header for sealing") return nil, err } - env, err := w.makeEnv(parent, header, w.coinbase) + proposedWoHeader := types.NewWorkObjectHeader(newWo.Hash(), newWo.ParentHash(nodeCtx), newWo.Number(nodeCtx), newWo.Difficulty(), types.EmptyRootHash, newWo.Nonce(), newWo.Time(), newWo.Location()) + proposedWoBody := types.NewWorkObjectBody(newWo.Header(), nil, nil, nil, nil, nil, nil, nodeCtx) + proposedWo := types.NewWorkObject(proposedWoHeader, proposedWoBody, nil, types.BlockObject) + env, err := w.makeEnv(parent, proposedWo, w.coinbase) if err != nil { w.logger.WithField("err", err).Error("Failed to create sealing context") return nil, err } // Accumulate the uncles for the sealing work. - commitUncles := func(blocks map[common.Hash]*types.Block) { - for hash, uncle := range blocks { + commitUncles := func(wos map[common.Hash]*types.WorkObjectHeader) { + for hash, uncle := range wos { env.uncleMu.RLock() if len(env.uncles) == 2 { env.uncleMu.RUnlock() break } env.uncleMu.RUnlock() - if err := w.commitUncle(env, uncle.Header()); err != nil { + if err := w.commitUncle(env, uncle); err != nil { w.logger.WithFields(log.Fields{ "hash": hash, "reason": err, @@ -1148,7 +1159,10 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en } return env, nil } else { - return &environment{header: header}, nil + proposedWoHeader := types.NewWorkObjectHeader(newWo.Hash(), newWo.ParentHash(nodeCtx), newWo.Number(nodeCtx), newWo.Difficulty(), types.EmptyRootHash, newWo.Nonce(), newWo.Time(), newWo.Location()) + proposedWoBody := types.NewWorkObjectBody(newWo.Header(), nil, nil, nil, nil, nil, nil, nodeCtx) + proposedWo := types.NewWorkObject(proposedWoHeader, proposedWoBody, nil, types.BlockObject) + return &environment{wo: proposedWo}, nil } } @@ -1156,14 +1170,14 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en // fillTransactions retrieves the pending transactions from the txpool and fills them // into the given sealing block. The transaction selection and ordering strategy can // be customized with the plugin in the future. -func (w *worker) fillTransactions(interrupt *int32, env *environment, block *types.Block, fill bool) *types.EtxSet { +func (w *worker) fillTransactions(interrupt *int32, env *environment, block *types.WorkObject, fill bool) *types.EtxSet { // Split the pending transactions into locals and remotes // Fill the block with all available pending transactions. etxs := make([]*types.Transaction, 0) - etxSet := rawdb.ReadEtxSet(w.hc.bc.db, block.Hash(), block.NumberU64(w.hc.NodeCtx()), w.hc.NodeLocation()) + etxSet := rawdb.ReadEtxSet(w.hc.bc.db, block.Hash(), block.NumberU64(w.hc.NodeCtx())) if etxSet != nil { etxs = make([]*types.Transaction, 0, len(etxSet.ETXHashes)/common.HashLength) - maxEtxGas := (env.header.GasLimit() / params.MinimumEtxGasDivisor) * params.MaximumEtxGasMultiplier + maxEtxGas := (env.wo.GasLimit() / params.MinimumEtxGasDivisor) * params.MaximumEtxGasMultiplier totalGasEstimate := uint64(0) index := 0 for { @@ -1171,7 +1185,7 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment, block *typ if (hash == common.Hash{}) { // no more ETXs break } - entry := rawdb.ReadETX(w.hc.bc.db, hash, w.hc.NodeLocation()) + entry := rawdb.ReadETX(w.hc.bc.db, hash) if entry == nil { log.Global.Errorf("ETX %s not found in the database!", hash.String()) break @@ -1185,7 +1199,7 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment, block *typ } if !fill { if len(etxs) > 0 { - w.commitTransactions(env, block.Header(), etxs, &types.TransactionsByPriceAndNonce{}, etxSet, interrupt) + w.commitTransactions(env, block, etxs, &types.TransactionsByPriceAndNonce{}, etxSet, interrupt) } return etxSet } @@ -1198,8 +1212,8 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment, block *typ pendingQiTxs := w.txPool.QiPoolPending() if len(pending) > 0 || len(pendingQiTxs) > 0 || len(etxs) > 0 { - txs := types.NewTransactionsByPriceAndNonce(env.signer, pendingQiTxs, pending, env.header.BaseFee(), true) - if w.commitTransactions(env, block.Header(), etxs, txs, etxSet, interrupt) { + txs := types.NewTransactionsByPriceAndNonce(env.signer, pendingQiTxs, pending, env.wo.BaseFee(), true) + if w.commitTransactions(env, block, etxs, txs, etxSet, interrupt) { return etxSet } } @@ -1209,13 +1223,13 @@ func (w *worker) fillTransactions(interrupt *int32, env *environment, block *typ // fillTransactions retrieves the pending transactions from the txpool and fills them // into the given sealing block. The transaction selection and ordering strategy can // be customized with the plugin in the future. -func (w *worker) adjustGasLimit(interrupt *int32, env *environment, parent *types.Block) { - env.header.SetGasLimit(CalcGasLimit(parent.Header(), w.config.GasCeil)) +func (w *worker) adjustGasLimit(env *environment, parent *types.WorkObject) { + env.wo.Header().SetGasLimit(CalcGasLimit(parent, w.config.GasCeil)) } // ComputeManifestHash given a header computes the manifest hash for the header // and stores it in the database -func (w *worker) ComputeManifestHash(header *types.Header) common.Hash { +func (w *worker) ComputeManifestHash(header *types.WorkObject) common.Hash { manifest := rawdb.ReadManifest(w.workerDb, header.Hash()) if manifest == nil { nodeCtx := w.hc.NodeCtx() @@ -1238,26 +1252,26 @@ func (w *worker) ComputeManifestHash(header *types.Header) common.Hash { return manifestHash } -func (w *worker) FinalizeAssemble(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Block, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.Block, error) { +func (w *worker) FinalizeAssemble(chain consensus.ChainHeaderReader, newWo *types.WorkObject, parent *types.WorkObject, state *state.StateDB, txs []*types.Transaction, uncles []*types.WorkObjectHeader, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.WorkObject, error) { nodeCtx := w.hc.NodeCtx() - block, err := w.engine.FinalizeAndAssemble(chain, header, state, txs, uncles, etxs, subManifest, receipts) + wo, err := w.engine.FinalizeAndAssemble(chain, newWo, state, txs, uncles, etxs, subManifest, receipts) if err != nil { return nil, err } // Once the uncles list is assembled in the block if nodeCtx == common.ZONE_CTX { - block.Header().SetUncledS(w.engine.UncledLogS(block)) + wo.Header().SetUncledS(w.engine.UncledLogS(wo)) } - manifestHash := w.ComputeManifestHash(parent.Header()) + manifestHash := w.ComputeManifestHash(parent) if w.hc.ProcessingState() { - block.Header().SetManifestHash(manifestHash, nodeCtx) + wo.Header().SetManifestHash(manifestHash, nodeCtx) if nodeCtx == common.ZONE_CTX { // Compute and set etx rollup hash var etxRollup types.Transactions - if w.engine.IsDomCoincident(w.hc, parent.Header()) { + if w.engine.IsDomCoincident(w.hc, parent) { etxRollup = parent.ExtTransactions() } else { etxRollup, err = w.hc.CollectEtxRollup(parent) @@ -1267,44 +1281,30 @@ func (w *worker) FinalizeAssemble(chain consensus.ChainHeaderReader, header *typ etxRollup = append(etxRollup, parent.ExtTransactions()...) } etxRollupHash := types.DeriveSha(etxRollup, trie.NewStackTrie(nil)) - block.Header().SetEtxRollupHash(etxRollupHash) + wo.Header().SetEtxRollupHash(etxRollupHash) } - - w.AddPendingBlockBody(block.Header(), block.Body()) } - return block, nil -} -// GetPendingBlockBodyKey takes a header and hashes all the Roots together -// and returns the key to be used for the pendingBlockBodyCache. -func (w *worker) getPendingBlockBodyKey(header *types.Header) common.Hash { - return types.RlpHash([]interface{}{ - header.EVMRoot(), - header.UTXORoot(), - header.UncleHash(), - header.TxHash(), - header.EtxHash(), - }) + return wo, nil } // AddPendingBlockBody adds an entry in the lru cache for the given pendingBodyKey // maps it to body. -func (w *worker) AddPendingBlockBody(header *types.Header, body *types.Body) { - w.pendingBlockBody.ContainsOrAdd(w.getPendingBlockBodyKey(header), body) +func (w *worker) AddPendingWorkObjectBody(wo *types.WorkObject) { + w.pendingBlockBody.Add(wo.SealHash(), wo) } // GetPendingBlockBody gets the block body associated with the given header. -func (w *worker) GetPendingBlockBody(header *types.Header) *types.Body { - key := w.getPendingBlockBodyKey(header) - body, ok := w.pendingBlockBody.Get(key) +func (w *worker) GetPendingBlockBody(woHeader *types.WorkObject) (*types.WorkObject, error) { + body, ok := w.pendingBlockBody.Get(woHeader.SealHash()) if ok { - return body.(*types.Body) + return body.(*types.WorkObject), nil } - w.logger.WithField("key", key).Warn("pending block body not found for header") - return nil + w.logger.WithField("key", woHeader.SealHash()).Warn("pending block body not found for header") + return nil, errors.New("pending block body not found") } -func (w *worker) SubscribeAsyncPendingHeader(ch chan *types.Header) event.Subscription { +func (w *worker) SubscribeAsyncPendingHeader(ch chan *types.WorkObject) event.Subscription { return w.scope.Track(w.asyncPhFeed.Subscribe(ch)) } @@ -1319,7 +1319,7 @@ func copyReceipts(receipts []*types.Receipt) []*types.Receipt { } // totalFees computes total consumed miner fees in ETH. Block transactions and receipts have to have the same order. -func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { +func totalFees(block *types.WorkObject, receipts []*types.Receipt) *big.Float { feesWei := new(big.Int) for i, tx := range block.QuaiTransactions() { minerFee, _ := tx.EffectiveGasTip(block.BaseFee()) @@ -1328,7 +1328,7 @@ func totalFees(block *types.Block, receipts []*types.Receipt) *big.Float { return new(big.Float).Quo(new(big.Float).SetInt(feesWei), new(big.Float).SetInt(big.NewInt(params.Ether))) } -func (w *worker) CurrentInfo(header *types.Header) bool { +func (w *worker) CurrentInfo(header *types.WorkObject) bool { if w.headerPrints.Contains(header.Hash()) { return false } @@ -1342,7 +1342,7 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment) error { if tx.Type() != types.QiTxType { return fmt.Errorf("tx %032x is not a QiTx", tx.Hash()) } - if types.IsCoinBaseTx(tx, env.header.ParentHash(w.hc.NodeCtx()), location) { + if types.IsCoinBaseTx(tx, env.wo.ParentHash(w.hc.NodeCtx()), location) { return fmt.Errorf("tx %032x is a coinbase QiTx", tx.Hash()) } if tx.ChainId().Cmp(w.chainConfig.ChainID) != 0 { @@ -1351,7 +1351,7 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment) error { txGas := types.CalculateQiTxGas(tx) - gasUsed := env.header.GasUsed() + gasUsed := env.wo.Header().GasUsed() gasUsed += txGas addresses := make(map[common.AddressBytes]struct{}) totalQitIn := big.NewInt(0) @@ -1425,7 +1425,7 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment) error { // We should require some kind of extra fee here etxInner := types.ExternalTx{Value: big.NewInt(int64(txOut.Denomination)), To: &toAddr, Sender: common.ZeroAddress(location), OriginatingTxHash: tx.Hash(), ETXIndex: uint16(txOutIdx), Gas: params.TxGas} etx := types.NewTx(&etxInner) - primeTerminus := w.hc.GetPrimeTerminus(env.header) + primeTerminus := w.hc.GetPrimeTerminus(env.wo) if !w.hc.IsSliceSetToReceiveEtx(primeTerminus, *toAddr.Location()) { return fmt.Errorf("etx emitted by tx [%v] going to a slice that is not eligible to receive etx %v", tx.Hash().Hex(), *toAddr.Location()) } @@ -1449,7 +1449,7 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment) error { return err } // Check tx against required base fee and gas - baseFeeInQi := misc.QuaiToQi(env.header, env.header.BaseFee()) + baseFeeInQi := misc.QuaiToQi(env.wo, env.wo.Header().BaseFee()) minimumFee := new(big.Int).Mul(baseFeeInQi, big.NewInt(int64(txGas))) if txFeeInQit.Cmp(minimumFee) < 0 { return fmt.Errorf("tx %032x has insufficient fee for base fee of %d and gas of %d", tx.Hash(), baseFeeInQi.Uint64(), txGas) @@ -1458,7 +1458,7 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment) error { // Miner gets remainder of fee after base fee txFeeInQit.Sub(txFeeInQit, minimumFee) - env.header.SetGasUsed(gasUsed) + env.wo.Header().SetGasUsed(gasUsed) env.etxRLimit -= ETXRCount env.etxPLimit -= ETXPCount env.etxs = append(env.etxs, etxs...) @@ -1470,13 +1470,15 @@ func (w *worker) processQiTx(tx *types.Transaction, env *environment) error { for outPoint, utxo := range utxosCreate { env.state.CreateUTXO(outPoint.TxHash, outPoint.Index, utxo) } + gasUsed = env.wo.Header().GasUsed() + env.wo.Header().SetGasUsed(gasUsed + txGas) return nil } // createCoinbaseTx returns a coinbase transaction paying an appropriate subsidy // based on the passed block height to the provided address. When the address // is nil, the coinbase transaction will instead be redeemable by anyone. -func createCoinbaseTxWithFees(header *types.Header, fees *big.Int, state *state.StateDB) (*types.Transaction, error) { +func createCoinbaseTxWithFees(header *types.WorkObject, fees *big.Int, state *state.StateDB) (*types.Transaction, error) { parentHash := header.ParentHash(header.Location().Context()) // all blocks should have zone location and context // The coinbase transaction input must be the parent hash encoded with the proper origin location diff --git a/ethdb/database.go b/ethdb/database.go index 01eed4e6e2..1eb92c3f7e 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -19,6 +19,8 @@ package ethdb import ( "io" + + "github.com/dominant-strategies/go-quai/common" ) // KeyValueReader wraps the Has and Get method of a backing data store. @@ -28,6 +30,8 @@ type KeyValueReader interface { // Get retrieves the given key if it's present in the key-value data store. Get(key []byte) ([]byte, error) + + Location() common.Location } // KeyValueWriter wraps the Put method of a backing data store. diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index e649c03465..12b9bd5da5 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -65,13 +65,14 @@ type Database struct { quitLock sync.Mutex // Mutex protecting the quit channel access quitChan chan chan error // Quit channel to stop the metrics collection before closing the database - logger *log.Logger // Contextual logger tracking the database path + logger *log.Logger // Contextual logger tracking the database path + location common.Location } // New returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger) (*Database, error) { - return NewCustom(file, namespace, func(options *opt.Options) { +func New(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger, location common.Location) (*Database, error) { + return NewCustom(file, namespace, location, func(options *opt.Options) { // Ensure we have some minimal caching and file guarantees if cache < minCache { cache = minCache @@ -92,7 +93,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, l // NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. // The customize function allows the caller to modify the leveldb options. -func NewCustom(file string, namespace string, customize func(options *opt.Options), logger *log.Logger) (*Database, error) { +func NewCustom(file string, namespace string, location common.Location, customize func(options *opt.Options), logger *log.Logger) (*Database, error) { options := configureOptions(customize) usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2 logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()} @@ -115,6 +116,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option db: db, quitChan: make(chan chan error), logger: logger, + location: location, } // Start up the metrics gathering and return @@ -135,6 +137,10 @@ func configureOptions(customizeFn func(*opt.Options)) *opt.Options { return options } +func (db *Database) Location() common.Location { + return db.location +} + // Close stops the metrics collection, flushes any pending data to disk and closes // all io accesses to the underlying key-value store. func (db *Database) Close() error { diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index a340857b74..01e3170102 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -185,6 +185,10 @@ func (db *Database) Len() int { return len(db.db) } +func (db *Database) Location() common.Location { + return nil +} + // keyvalue is a key-value tuple tagged with a deletion field to allow creating // memory-database write batches. type keyvalue struct { diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index fa664e6cc2..c1ea6b672d 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -69,6 +69,7 @@ type Database struct { writeDelayStartTime time.Time // The start time of the latest write stall writeDelayCount atomic.Int64 // Total number of write stall counts writeDelayTime atomic.Int64 // Total time spent in write stalls + location common.Location } func (d *Database) onCompactionBegin(info pebble.CompactionInfo) { @@ -103,7 +104,7 @@ func (d *Database) onWriteStallEnd() { // New returns a wrapped pebble DB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger) (*Database, error) { +func New(file string, cache int, handles int, namespace string, readonly bool, logger *log.Logger, location common.Location) (*Database, error) { // Ensure we have some minimal caching and file guarantees if cache < minCache { cache = minCache @@ -132,6 +133,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool, l fn: file, quitChan: make(chan chan error), logger: logger, + location: location, } opt := &pebble.Options{ // Pebble has a single combined cache area and the write @@ -282,6 +284,10 @@ func (d *Database) NewBatchWithSize(_ int) ethdb.Batch { } } +func (db *Database) Location() common.Location { + return db.location +} + // upperBound returns the upper bound for the given prefix func upperBound(prefix []byte) (limit []byte) { for i := len(prefix) - 1; i >= 0; i-- { diff --git a/interfaces.go b/interfaces.go index dfdd09ddbc..dfea66ba11 100644 --- a/interfaces.go +++ b/interfaces.go @@ -52,16 +52,16 @@ type Subscription interface { // // The returned error is NotFound if the requested item does not exist. type ChainReader interface { - BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) - BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) - HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) + BlockByNumber(ctx context.Context, number *big.Int) (*types.WorkObject, error) + HeaderByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.WorkObject, error) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) // This method subscribes to notifications about changes of the head block of // the canonical chain. - SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (Subscription, error) + SubscribeNewHead(ctx context.Context, ch chan<- *types.WorkObject) (Subscription, error) } // TransactionReader provides access to past transactions and their receipts. diff --git a/internal/quaiapi/api.go b/internal/quaiapi/api.go index 50349c5cd0..5ad7397a7a 100644 --- a/internal/quaiapi/api.go +++ b/internal/quaiapi/api.go @@ -358,7 +358,7 @@ func (s *PublicBlockChainAPI) GetProof(ctx context.Context, address common.Addre func (s *PublicBlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { header, err := s.b.HeaderByNumber(ctx, number) if header != nil && err == nil { - response := RPCMarshalETHHeader(header) + response := RPCMarshalETHHeader(header.Header()) //TODO: mmtx this function will break once extra fields are stripped from header. if number == rpc.PendingBlockNumber { // Pending header need to nil out a few fields for _, field := range []string{"hash", "nonce", "miner"} { @@ -374,7 +374,7 @@ func (s *PublicBlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc. func (s *PublicBlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) map[string]interface{} { header, _ := s.b.HeaderByHash(ctx, hash) if header != nil { - return RPCMarshalETHHeader(header) + return RPCMarshalETHHeader(header.Header()) } return nil } @@ -423,8 +423,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockNumberAndIndex(ctx context.Context, }).Debug("Requested uncle not found") return nil, nil } - block = types.NewBlockWithHeader(uncles[index]) - return s.rpcMarshalBlock(ctx, block, false, false) + return uncles[index].RPCMarshalWorkObjectHeader(), nil } return nil, err } @@ -443,8 +442,7 @@ func (s *PublicBlockChainAPI) GetUncleByBlockHashAndIndex(ctx context.Context, b }).Debug("Requested uncle not found") return nil, nil } - block = types.NewBlockWithHeader(uncles[index]) - return s.rpcMarshalBlock(ctx, block, false, false) + return uncles[index].RPCMarshalWorkObjectHeader(), nil } return nil, err } @@ -930,18 +928,14 @@ func RPCMarshalETHHeader(head *types.Header) map[string]interface{} { "number": (*hexutil.Big)(head.Number(common.ZONE_CTX)), "hash": head.Hash(), "parentHash": head.ParentHash, - "nonce": head.Nonce, "sha3Uncles": head.UncleHash, "evmRoot": head.EVMRoot, "miner": head.Coinbase, - "difficulty": (*hexutil.Big)(head.Difficulty()), "extraData": hexutil.Bytes(head.Extra()), "size": hexutil.Uint64(head.Size()), "gasLimit": hexutil.Uint64(head.GasLimit()), "gasUsed": hexutil.Uint64(head.GasUsed()), "baseFee": hexutil.Big(*head.BaseFee()), - "location": head.Location(), - "timestamp": hexutil.Uint64(head.Time()), "transactionsRoot": head.TxHash, "receiptsRoot": head.ReceiptHash, } @@ -952,7 +946,7 @@ func RPCMarshalETHHeader(head *types.Header) map[string]interface{} { // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. -func RPCMarshalETHBlock(block *types.Block, inclTx bool, fullTx bool, nodeLocation common.Location) (map[string]interface{}, error) { +func RPCMarshalETHBlock(block *types.WorkObject, inclTx bool, fullTx bool, nodeLocation common.Location) (map[string]interface{}, error) { fields := RPCMarshalETHHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size()) @@ -987,20 +981,20 @@ func RPCMarshalETHBlock(block *types.Block, inclTx bool, fullTx bool, nodeLocati // rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires // a `PublicBlockchainAPI`. -func (s *PublicBlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Header) map[string]interface{} { - fields := RPCMarshalETHHeader(header) +func (s *PublicBlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.WorkObject) map[string]interface{} { + fields := RPCMarshalETHHeader(header.Header()) fields["totalEntropy"] = (*hexutil.Big)(s.b.TotalLogS(header)) return fields } // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires // a `PublicBlockchainAPI`. -func (s *PublicBlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { +func (s *PublicBlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.WorkObject, inclTx bool, fullTx bool) (map[string]interface{}, error) { fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.NodeLocation()) if err != nil { return nil, err } - fields["totalEntropy"] = (*hexutil.Big)(s.b.TotalLogS(b.Header())) + fields["totalEntropy"] = (*hexutil.Big)(s.b.TotalLogS(b)) return fields, err } @@ -1109,7 +1103,7 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber } // newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation -func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, config *params.ChainConfig) *RPCTransaction { +func newRPCPendingTransaction(tx *types.Transaction, current *types.WorkObject, config *params.ChainConfig) *RPCTransaction { var baseFee *big.Int if current != nil { baseFee = misc.CalcBaseFee(config, current) @@ -1118,7 +1112,7 @@ func newRPCPendingTransaction(tx *types.Transaction, current *types.Header, conf } // newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, etxs bool, nodeLocation common.Location) *RPCTransaction { +func newRPCTransactionFromBlockIndex(b *types.WorkObject, index uint64, etxs bool, nodeLocation common.Location) *RPCTransaction { nodeCtx := nodeLocation.Context() var txs types.Transactions if etxs { @@ -1133,7 +1127,7 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64, etxs bool, no } // newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. -func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.Bytes { +func newRPCRawTransactionFromBlockIndex(b *types.WorkObject, index uint64) hexutil.Bytes { txs := b.Transactions() if index >= uint64(len(txs)) { return nil @@ -1143,7 +1137,7 @@ func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.By } // newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash, etxs bool, nodeLocation common.Location) *RPCTransaction { +func newRPCTransactionFromBlockHash(b *types.WorkObject, hash common.Hash, etxs bool, nodeLocation common.Location) *RPCTransaction { if etxs { for idx, tx := range b.ExtTransactions() { if tx.Hash() == hash { @@ -1378,7 +1372,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionByHash(ctx context.Context, has } // No finalized transaction, try to retrieve it from the pool if tx := s.b.GetPoolTransaction(hash); tx != nil { - return newRPCPendingTransaction(tx, s.b.CurrentHeader(), s.b.ChainConfig()), nil + return newRPCPendingTransaction(tx, s.b.CurrentBlock(), s.b.ChainConfig()), nil } // Transaction unknown, return as such diff --git a/internal/quaiapi/backend.go b/internal/quaiapi/backend.go index fb8de65c92..fac1263d0b 100644 --- a/internal/quaiapi/backend.go +++ b/internal/quaiapi/backend.go @@ -53,52 +53,52 @@ type Backend interface { // Blockchain API NodeLocation() common.Location NodeCtx() int - HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) - HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) - HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) - CurrentHeader() *types.Header - CurrentBlock() *types.Block + HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) + HeaderByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) + HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.WorkObject, error) + CurrentHeader() *types.WorkObject + CurrentBlock() *types.WorkObject CurrentLogEntropy() *big.Int - TotalLogS(header *types.Header) *big.Int - CalcOrder(header *types.Header) (*big.Int, int, error) - BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) - BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) - BlockOrCandidateByHash(hash common.Hash) *types.Block - BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) - StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) - StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) + TotalLogS(header *types.WorkObject) *big.Int + CalcOrder(header *types.WorkObject) (*big.Int, int, error) + BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) + BlockOrCandidateByHash(hash common.Hash) *types.WorkObject + BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.WorkObject, error) + StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.WorkObject, error) + StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.WorkObject, error) UTXOsByAddress(ctx context.Context, address common.Address) ([]*types.UtxoEntry, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) - GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) + GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.WorkObject, vmConfig *vm.Config) (*vm.EVM, func() error, error) SetCurrentExpansionNumber(expansionNumber uint8) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription - WriteBlock(block *types.Block) - Append(header *types.Header, manifest types.BlockManifest, domPendingHeader *types.Header, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) + WriteBlock(block *types.WorkObject) + Append(header *types.WorkObject, manifest types.BlockManifest, domPendingHeader *types.WorkObject, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) DownloadBlocksInManifest(hash common.Hash, manifest types.BlockManifest, entropy *big.Int) - ConstructLocalMinedBlock(header *types.Header) (*types.Block, error) - InsertBlock(ctx context.Context, block *types.Block) (int, error) - PendingBlock() *types.Block + ConstructLocalMinedBlock(header *types.WorkObject) (*types.WorkObject, error) + InsertBlock(ctx context.Context, block *types.WorkObject) (int, error) + PendingBlock() *types.WorkObject SubRelayPendingHeader(pendingHeader types.PendingHeader, newEntropy *big.Int, location common.Location, subReorg bool, order int) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingHeader, location common.Location) RequestDomToAppendOrFetch(hash common.Hash, entropy *big.Int, order int) - NewGenesisPendingHeader(pendingHeader *types.Header, domTerminus common.Hash, hash common.Hash) - GetPendingHeader() (*types.Header, error) + NewGenesisPendingHeader(pendingHeader *types.WorkObject, domTerminus common.Hash, hash common.Hash) + GetPendingHeader() (*types.WorkObject, error) GetManifest(blockHash common.Hash) (types.BlockManifest, error) GetSubManifest(slice common.Location, blockHash common.Hash) (types.BlockManifest, error) AddPendingEtxs(pEtxs types.PendingEtxs) error AddPendingEtxsRollup(pEtxsRollup types.PendingEtxsRollup) error - PendingBlockAndReceipts() (*types.Block, types.Receipts) - GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes types.Termini) error + PendingBlockAndReceipts() (*types.WorkObject, types.Receipts) + GenerateRecoveryPendingHeader(pendingHeader *types.WorkObject, checkpointHashes types.Termini) error GetPendingEtxsRollupFromSub(hash common.Hash, location common.Location) (types.PendingEtxsRollup, error) GetPendingEtxsFromSub(hash common.Hash, location common.Location) (types.PendingEtxs, error) ProcessingState() bool GetSlicesRunning() []common.Location SetSubClient(client *quaiclient.Client, location common.Location) - AddGenesisPendingEtxs(block *types.Block) + AddGenesisPendingEtxs(block *types.WorkObject) SubscribeExpansionEvent(ch chan<- core.ExpansionEvent) event.Subscription - WriteGenesisBlock(block *types.Block, location common.Location) + WriteGenesisBlock(block *types.WorkObject, location common.Location) // Transaction pool API SendTx(ctx context.Context, signedTx *types.Transaction) error @@ -119,7 +119,7 @@ type Backend interface { SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription - SubscribePendingHeaderEvent(ch chan<- *types.Header) event.Subscription + SubscribePendingHeaderEvent(ch chan<- *types.WorkObject) event.Subscription ChainConfig() *params.ChainConfig Engine() consensus.Engine @@ -128,7 +128,7 @@ type Backend interface { Logger() *log.Logger // P2P apis - BroadcastBlock(block *types.Block, location common.Location) error + BroadcastBlock(block *types.WorkObject, location common.Location) error } func GetAPIs(apiBackend Backend) []rpc.API { diff --git a/internal/quaiapi/quai_api.go b/internal/quaiapi/quai_api.go index 438825e315..588e46e855 100644 --- a/internal/quaiapi/quai_api.go +++ b/internal/quaiapi/quai_api.go @@ -234,7 +234,7 @@ func (s *PublicBlockChainQuaiAPI) GetProof(ctx context.Context, address common.A func (s *PublicBlockChainQuaiAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { header, err := s.b.HeaderByNumber(ctx, number) if header != nil && err == nil { - response := header.RPCMarshalHeader() + response := header.RPCMarshalWorkObject() if number == rpc.PendingBlockNumber { // Pending header need to nil out a few fields for _, field := range []string{"hash", "nonce", "miner"} { @@ -259,7 +259,7 @@ func (s *PublicBlockChainQuaiAPI) GetHeaderHashByNumber(ctx context.Context, num func (s *PublicBlockChainQuaiAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) map[string]interface{} { header, _ := s.b.HeaderByHash(ctx, hash) if header != nil { - return header.RPCMarshalHeader() + return header.RPCMarshalWorkObject() } return nil } @@ -308,8 +308,7 @@ func (s *PublicBlockChainQuaiAPI) GetUncleByBlockNumberAndIndex(ctx context.Cont }).Debug("Requested uncle not found") return nil, nil } - block = types.NewBlockWithHeader(uncles[index]) - return s.rpcMarshalBlock(ctx, block, false, false) + return uncles[index].RPCMarshalWorkObjectHeader(), nil } return nil, err } @@ -328,8 +327,7 @@ func (s *PublicBlockChainQuaiAPI) GetUncleByBlockHashAndIndex(ctx context.Contex }).Debug("Requested uncle not found") return nil, nil } - block = types.NewBlockWithHeader(uncles[index]) - return s.rpcMarshalBlock(ctx, block, false, false) + return uncles[index].RPCMarshalWorkObjectHeader(), nil } pendBlock, _ := s.b.PendingBlockAndReceipts() if pendBlock != nil && pendBlock.Hash() == blockHash { @@ -342,8 +340,7 @@ func (s *PublicBlockChainQuaiAPI) GetUncleByBlockHashAndIndex(ctx context.Contex }).Debug("Requested uncle not found in pending block") return nil, nil } - block = types.NewBlockWithHeader(uncles[index]) - return s.rpcMarshalBlock(ctx, block, false, false) + return uncles[index].RPCMarshalWorkObjectHeader(), nil } return nil, err } @@ -456,7 +453,7 @@ func (s *PublicBlockChainQuaiAPI) EstimateGas(ctx context.Context, args Transact // If txType is set to "true" returns the Quai base fee in units of Wei. // If txType is set to "false" returns the Qi base fee in units of Qit. func (s *PublicBlockChainQuaiAPI) BaseFee(ctx context.Context, txType bool) (*big.Int, error) { - header := s.b.CurrentHeader() + header := s.b.CurrentBlock() if header == nil { return nil, errors.New("no header available") } @@ -477,7 +474,7 @@ func (s *PublicBlockChainQuaiAPI) BaseFee(ctx context.Context, txType bool) (*bi // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. -func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, nodeLocation common.Location) (map[string]interface{}, error) { +func RPCMarshalBlock(block *types.WorkObject, inclTx bool, fullTx bool, nodeLocation common.Location) (map[string]interface{}, error) { fields := block.Header().RPCMarshalHeader() fields["size"] = hexutil.Uint64(block.Size()) @@ -514,7 +511,7 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, nodeLocation } fields["uncles"] = block.Uncles() - fields["subManifest"] = block.SubManifest() + fields["subManifest"] = block.Manifest() fields["interlinkHashes"] = block.InterlinkHashes() return fields, nil @@ -547,25 +544,25 @@ func RPCMarshalHash(hash common.Hash) (map[string]interface{}, error) { // rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires // a `PublicBlockchainQuaiAPI`. -func (s *PublicBlockChainQuaiAPI) rpcMarshalHeader(ctx context.Context, header *types.Header) map[string]interface{} { - fields := header.RPCMarshalHeader() +func (s *PublicBlockChainQuaiAPI) rpcMarshalHeader(ctx context.Context, header *types.WorkObject) map[string]interface{} { + fields := header.RPCMarshalWorkObject() fields["totalEntropy"] = (*hexutil.Big)(s.b.TotalLogS(header)) return fields } // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires // a `PublicBlockchainAPI`. -func (s *PublicBlockChainQuaiAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { +func (s *PublicBlockChainQuaiAPI) rpcMarshalBlock(ctx context.Context, b *types.WorkObject, inclTx bool, fullTx bool) (map[string]interface{}, error) { fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.NodeLocation()) if err != nil { return nil, err } - _, order, err := s.b.CalcOrder(b.Header()) + _, order, err := s.b.CalcOrder(b) if err != nil { return nil, err } fields["order"] = order - fields["totalEntropy"] = (*hexutil.Big)(s.b.TotalLogS(b.Header())) + fields["totalEntropy"] = (*hexutil.Big)(s.b.TotalLogS(b)) return fields, err } @@ -594,11 +591,11 @@ func (s *PublicBlockChainQuaiAPI) CreateAccessList(ctx context.Context, args Tra return result, nil } -func (s *PublicBlockChainQuaiAPI) fillSubordinateManifest(b *types.Block) (*types.Block, error) { +func (s *PublicBlockChainQuaiAPI) fillSubordinateManifest(b *types.WorkObject) (*types.WorkObject, error) { nodeCtx := s.b.NodeCtx() if b.ManifestHash(nodeCtx+1) == types.EmptyRootHash { return nil, errors.New("cannot fill empty subordinate manifest") - } else if subManifestHash := types.DeriveSha(b.SubManifest(), trie.NewStackTrie(nil)); subManifestHash == b.ManifestHash(nodeCtx+1) { + } else if subManifestHash := types.DeriveSha(b.Manifest(), trie.NewStackTrie(nil)); subManifestHash == b.ManifestHash(nodeCtx+1) { // If the manifest hashes match, nothing to do return b, nil } else { @@ -623,7 +620,7 @@ func (s *PublicBlockChainQuaiAPI) fillSubordinateManifest(b *types.Block) (*type if subManifest == nil || b.ManifestHash(nodeCtx+1) != types.DeriveSha(subManifest, trie.NewStackTrie(nil)) { return nil, errors.New("reconstructed sub manifest does not match manifest hash") } - return types.NewBlockWithHeader(b.Header()).WithBody(b.Transactions(), b.Uncles(), b.ExtTransactions(), subManifest, b.InterlinkHashes()), nil + return types.NewWorkObjectWithHeaderAndTx(b.WorkObjectHeader(), b.Tx()).WithBody(b.Header(), b.Transactions(), b.ExtTransactions(), b.Uncles(), subManifest, b.InterlinkHashes()), nil } } @@ -631,11 +628,12 @@ func (s *PublicBlockChainQuaiAPI) fillSubordinateManifest(b *types.Block) (*type func (s *PublicBlockChainQuaiAPI) ReceiveMinedHeader(ctx context.Context, raw json.RawMessage) error { nodeCtx := s.b.NodeCtx() // Decode header and transactions. - var header *types.Header - if err := json.Unmarshal(raw, &header); err != nil { + var woHeader *types.WorkObject + if err := json.Unmarshal(raw, &woHeader); err != nil { return err } - block, err := s.b.ConstructLocalMinedBlock(header) + woHeader.Header().SetCoinbase(common.BytesToAddress(woHeader.Coinbase().Bytes(), s.b.NodeLocation())) + block, err := s.b.ConstructLocalMinedBlock(woHeader) if err != nil && err.Error() == core.ErrBadSubManifest.Error() && nodeCtx < common.ZONE_CTX { s.b.Logger().Info("filling sub manifest") // If we just mined this block, and we have a subordinate chain, its possible @@ -649,6 +647,7 @@ func (s *PublicBlockChainQuaiAPI) ReceiveMinedHeader(ctx context.Context, raw js } else if err != nil { return err } + // Broadcast the block and announce chain insertion event if block.Header() != nil { err := s.b.BroadcastBlock(block, s.b.NodeLocation()) @@ -657,17 +656,17 @@ func (s *PublicBlockChainQuaiAPI) ReceiveMinedHeader(ctx context.Context, raw js } } s.b.Logger().WithFields(log.Fields{ - "number": header.Number(s.b.NodeCtx()), - "location": header.Location(), + "number": block.Number(s.b.NodeCtx()), + "location": block.Location(), }).Info("Received mined header") return nil } type tdBlock struct { - Header *types.Header `json:"header"` + Header *types.WorkObject `json:"header"` Manifest types.BlockManifest `json:"manifest"` - DomPendingHeader *types.Header `json:"domPendingHeader"` + DomPendingHeader *types.WorkObject `json:"domPendingHeader"` DomTerminus common.Hash `json:"domTerminus"` DomOrigin bool `json:"domOrigin"` NewInboundEtxs types.Transactions `json:"newInboundEtxs"` @@ -681,6 +680,7 @@ func (s *PublicBlockChainQuaiAPI) Append(ctx context.Context, raw json.RawMessag return nil, err } + body.Header.Header().SetCoinbase(common.BytesToAddress(body.Header.Coinbase().Bytes(), s.b.NodeLocation())) pendingEtxs, subReorg, setHead, err := s.b.Append(body.Header, body.Manifest, body.DomPendingHeader, body.DomTerminus, body.DomOrigin, body.NewInboundEtxs) if err != nil { return nil, err @@ -711,8 +711,8 @@ func (s *PublicBlockChainQuaiAPI) DownloadBlocksInManifest(ctx context.Context, } type SubRelay struct { - Header *types.Header `json:"header"` - Termini types.Termini `json:"termini"` + Header *types.WorkObject `json:"header"` + Termini types.Termini `json:"termini"` NewEntropy *big.Int Location common.Location SubReorg bool @@ -730,8 +730,8 @@ func (s *PublicBlockChainQuaiAPI) SubRelayPendingHeader(ctx context.Context, raw type DomUpdate struct { OldTerminus common.Hash - Header *types.Header `json:"header"` - Termini types.Termini `json:"termini"` + Header *types.WorkObject `json:"header"` + Termini types.Termini `json:"termini"` Location common.Location } @@ -741,7 +741,6 @@ func (s *PublicBlockChainQuaiAPI) UpdateDom(ctx context.Context, raw json.RawMes s.b.Logger().WithField("err", err).Error("Error unmarshaling domUpdate in api") return } - pendingHeader := types.NewPendingHeader(domUpdate.Header, domUpdate.Termini) s.b.UpdateDom(domUpdate.OldTerminus, pendingHeader, domUpdate.Location) } @@ -761,9 +760,9 @@ func (s *PublicBlockChainQuaiAPI) RequestDomToAppendOrFetch(ctx context.Context, } type NewGenesisPendingHeaderArgs struct { - PendingHeader *types.Header `json:"header"` - Hash common.Hash `json:"genesisHash"` - DomTerminus common.Hash `json:"domTerminus"` + PendingHeader *types.WorkObject `json:"header"` + Hash common.Hash `json:"genesisHash"` + DomTerminus common.Hash `json:"domTerminus"` } func (s *PublicBlockChainQuaiAPI) NewGenesisPendingHeader(ctx context.Context, raw json.RawMessage) { @@ -789,7 +788,7 @@ func (s *PublicBlockChainQuaiAPI) GetPendingHeader(ctx context.Context) (map[str return nil, errors.New("no pending header found") } // Marshal the response. - marshaledPh := pendingHeader.RPCMarshalHeader() + marshaledPh := pendingHeader.RPCMarshalWorkObject() return marshaledPh, nil } @@ -806,7 +805,7 @@ func (s *PublicBlockChainQuaiAPI) GetManifest(ctx context.Context, raw json.RawM } type SendPendingEtxsToDomArgs struct { - Header types.Header `json:"header"` + Header types.WorkObject `json:"header"` NewPendingEtxs []types.Transactions `json:"newPendingEtxs"` } @@ -819,7 +818,7 @@ func (s *PublicBlockChainQuaiAPI) SendPendingEtxsToDom(ctx context.Context, raw } type SendPendingEtxsRollupToDomArgs struct { - Header *types.Header `json:"header"` + Header *types.WorkObject `json:"header"` EtxsRollup types.Transactions `json:"etxsrollup"` } @@ -832,8 +831,8 @@ func (s *PublicBlockChainQuaiAPI) SendPendingEtxsRollupToDom(ctx context.Context } type GenerateRecoveryPendingHeaderArgs struct { - PendingHeader *types.Header `json:"pendingHeader"` - CheckpointHashes types.Termini `json:"checkpointHashes"` + PendingHeader *types.WorkObject `json:"pendingHeader"` + CheckpointHashes types.Termini `json:"checkpointHashes"` } func (s *PublicBlockChainQuaiAPI) GenerateRecoveryPendingHeader(ctx context.Context, raw json.RawMessage) error { @@ -859,7 +858,7 @@ func (s *PublicBlockChainQuaiAPI) GetPendingEtxsRollupFromSub(ctx context.Contex return nil, err } fields := make(map[string]interface{}) - fields["header"] = pEtxsRollup.Header.RPCMarshalHeader() + fields["header"] = pEtxsRollup.Header.RPCMarshalWorkObject() fields["etxsrollup"] = pEtxsRollup.EtxsRollup return fields, nil @@ -880,7 +879,7 @@ func (s *PublicBlockChainQuaiAPI) GetPendingEtxsFromSub(ctx context.Context, raw return nil, err } fields := make(map[string]interface{}) - fields["header"] = pEtxs.Header.RPCMarshalHeader() + fields["header"] = pEtxs.Header.RPCMarshalWorkObject() fields["etxs"] = pEtxs.Etxs return fields, nil @@ -898,7 +897,7 @@ func (s *PublicBlockChainQuaiAPI) GetProtocolExpansionNumber() int { // Calculate the amount of Quai that Qi can be converted to. Expect the current Header and the Qi amount in "qits", returns the quai amount in "its" func (s *PublicBlockChainQuaiAPI) QiRateAtBlock(ctx context.Context, blockRef interface{}, qiAmount uint64) *big.Int { - var header *types.Header + var header *types.WorkObject var err error switch b := blockRef.(type) { case common.Hash: @@ -915,7 +914,7 @@ func (s *PublicBlockChainQuaiAPI) QiRateAtBlock(ctx context.Context, blockRef in // Calculate the amount of Qi that Quai can be converted to. Expect the current Header and the Quai amount in "its", returns the Qi amount in "qits" func (s *PublicBlockChainQuaiAPI) QuaiRateAtBlock(ctx context.Context, blockRef interface{}, quaiAmount uint64) *big.Int { - var header *types.Header + var header *types.WorkObject var err error switch b := blockRef.(type) { case common.Hash: diff --git a/node/node.go b/node/node.go index eeb2bef967..2fd0f23489 100644 --- a/node/node.go +++ b/node/node.go @@ -28,6 +28,7 @@ import ( "github.com/prometheus/tsdb/fileutil" + "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/core/rawdb" "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/event" @@ -51,6 +52,7 @@ type Node struct { http *httpServer // ws *httpServer // inprocHandler *rpc.Server // In-process RPC request handler to process the API requests + location []byte databases map[*closeTrackingDB]struct{} // All open databases } @@ -471,7 +473,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r Cache: cache, Handles: handles, ReadOnly: readonly, - }, n.config.NodeLocation.Context(), n.logger) + }, n.config.NodeLocation.Context(), n.logger, n.config.NodeLocation) } if err == nil { @@ -485,7 +487,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r // also attaching a chain freezer to it that moves ancient chain data from the // database to immutable append-only files. If the node is an ephemeral one, a // memory database is returned. -func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient, namespace string, readonly bool) (ethdb.Database, error) { +func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient, namespace string, readonly bool, location common.Location) (ethdb.Database, error) { n.lock.Lock() defer n.lock.Unlock() if n.state == closedState { @@ -504,7 +506,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient, Cache: cache, Handles: handles, ReadOnly: readonly, - }, n.config.NodeLocation.Context(), n.logger) + }, n.config.NodeLocation.Context(), n.logger, location) } if err == nil { diff --git a/p2p/node/api.go b/p2p/node/api.go index b1535924d6..a9c4bb9a2d 100644 --- a/p2p/node/api.go +++ b/p2p/node/api.go @@ -260,7 +260,7 @@ func (p *P2PNode) Connect(pi peer.AddrInfo) error { // Search for a block in the node's cache, or query the consensus backend if it's not found in cache. // Returns nil if the block is not found. -func (p *P2PNode) GetBlock(hash common.Hash, location common.Location) *types.Block { +func (p *P2PNode) GetWorkObject(hash common.Hash, location common.Location) *types.WorkObject { return p.consensus.LookupBlock(hash, location) } @@ -268,7 +268,7 @@ func (p *P2PNode) GetBlockHashByNumber(number *big.Int, location common.Location return p.consensus.LookupBlockHashByNumber(number, location) } -func (p *P2PNode) GetHeader(hash common.Hash, location common.Location) *types.Header { +func (p *P2PNode) GetHeader(hash common.Hash, location common.Location) *types.WorkObject { panic("TODO: implement") } @@ -278,7 +278,7 @@ func (p *P2PNode) GetTrieNode(hash common.Hash, location common.Location) *trie. func (p *P2PNode) handleBroadcast(sourcePeer peer.ID, data interface{}, nodeLocation common.Location) { switch v := data.(type) { - case types.Block: + case types.WorkObject: p.cacheAdd(v.Hash(), &v, nodeLocation) // TODO: send it to consensus case types.Transaction: diff --git a/p2p/node/node.go b/p2p/node/node.go index b98ea38b56..273cf5055c 100644 --- a/p2p/node/node.go +++ b/p2p/node/node.go @@ -233,7 +233,7 @@ func (p *P2PNode) p2pAddress() (multiaddr.Multiaddr, error) { // Helper to access the corresponding data cache func (p *P2PNode) pickCache(datatype interface{}, location common.Location) *lru.Cache[common.Hash, interface{}] { switch datatype.(type) { - case *types.Block: + case *types.WorkObject: return p.cache[location.Name()]["blocks"] case *types.Transaction: return p.cache[location.Name()]["transactions"] diff --git a/p2p/node/p2p_services.go b/p2p/node/p2p_services.go index 296c02e14a..3fad542268 100644 --- a/p2p/node/p2p_services.go +++ b/p2p/node/p2p_services.go @@ -77,8 +77,8 @@ func (p *P2PNode) requestFromPeer(peerID peer.ID, location common.Location, data // Check the received data type & hash matches the request switch datatype.(type) { - case *types.Block: - if block, ok := recvdType.(*types.Block); ok { + case *types.WorkObject: + if block, ok := recvdType.(*types.WorkObject); ok { switch data := data.(type) { case common.Hash: if block.Hash() == data { diff --git a/p2p/pb/proto_services.go b/p2p/pb/proto_services.go index 67eeb25861..c70a1a5cfb 100644 --- a/p2p/pb/proto_services.go +++ b/p2p/pb/proto_services.go @@ -38,10 +38,8 @@ func EncodeQuaiRequest(id uint32, location common.Location, data interface{}, da } switch datatype.(type) { - case *types.Block: - reqMsg.Request = &QuaiRequestMessage_Block{} - case *types.Header: - reqMsg.Request = &QuaiRequestMessage_Header{} + case *types.WorkObject: + reqMsg.Request = &QuaiRequestMessage_WorkObject{} case *types.Transaction: reqMsg.Request = &QuaiRequestMessage_Transaction{} case common.Hash: @@ -83,10 +81,8 @@ func DecodeQuaiRequest(reqMsg *QuaiRequestMessage) (uint32, interface{}, common. // Decode the request type var reqType interface{} switch t := reqMsg.Request.(type) { - case *QuaiRequestMessage_Block: - reqType = &types.Block{} - case *QuaiRequestMessage_Header: - reqType = &types.Header{} + case *QuaiRequestMessage_WorkObject: + reqType = &types.WorkObject{} case *QuaiRequestMessage_Transaction: reqType = &types.Transaction{} case *QuaiRequestMessage_BlockHash: @@ -112,19 +108,12 @@ func EncodeQuaiResponse(id uint32, location common.Location, data interface{}) ( } switch data := data.(type) { - case *types.Block: - protoBlock, err := data.ProtoEncode() - if err != nil { - return nil, err - } - respMsg.Response = &QuaiResponseMessage_Block{Block: protoBlock} - - case *types.Header: - protoHeader, err := data.ProtoEncode() + case *types.WorkObject: + protoWorkObject, err := data.ProtoEncode(types.BlockObject) if err != nil { return nil, err } - respMsg.Response = &QuaiResponseMessage_Header{Header: protoHeader} + respMsg.Response = &QuaiResponseMessage_WorkObject{WorkObject: protoWorkObject} case *types.Transaction: protoTransaction, err := data.ProtoEncode() @@ -162,25 +151,14 @@ func DecodeQuaiResponse(respMsg *QuaiResponseMessage) (uint32, interface{}, erro sourceLocation.ProtoDecode(respMsg.Location) switch respMsg.Response.(type) { - case *QuaiResponseMessage_Block: - protoBlock := respMsg.GetBlock() - block := &types.Block{} - if protoBlock.Header.Location == nil { - return id, nil, errors.New("location is nil") - } - err := block.ProtoDecode(protoBlock, *sourceLocation) + case *QuaiResponseMessage_WorkObject: + protoWorkObject := respMsg.GetWorkObject() + block := &types.WorkObject{} + err := block.ProtoDecode(protoWorkObject, *sourceLocation, types.BlockObject) if err != nil { return id, nil, err } return id, block, nil - case *QuaiResponseMessage_Header: - protoHeader := respMsg.GetHeader() - header := &types.Header{} - err := header.ProtoDecode(protoHeader) - if err != nil { - return id, nil, err - } - return id, header, nil case *QuaiResponseMessage_Transaction: protoTransaction := respMsg.GetTransaction() transaction := &types.Transaction{} @@ -206,9 +184,9 @@ func DecodeQuaiResponse(respMsg *QuaiResponseMessage) (uint32, interface{}, erro // Converts a custom go type to a proto type and marhsals it into a protobuf message func ConvertAndMarshal(data interface{}) ([]byte, error) { switch data := data.(type) { - case *types.Block: + case *types.WorkObject: log.Global.Tracef("marshalling block: %+v", data) - protoBlock, err := data.ProtoEncode() + protoBlock, err := data.ProtoEncode(types.BlockObject) if err != nil { return nil, err } @@ -239,21 +217,21 @@ func ConvertAndMarshal(data interface{}) ([]byte, error) { // Unmarshals a protobuf message into a proto type and converts it to a custom go type func UnmarshalAndConvert(data []byte, sourceLocation common.Location, dataPtr *interface{}, datatype interface{}) error { switch datatype.(type) { - case *types.Block: - protoBlock := &types.ProtoBlock{} - err := proto.Unmarshal(data, protoBlock) + case *types.WorkObject: + protoWorkObject := &types.ProtoWorkObject{} + err := proto.Unmarshal(data, protoWorkObject) if err != nil { return err } - block := &types.Block{} - if protoBlock.Header.Location == nil { + workObject := &types.WorkObject{} + if protoWorkObject.WoHeader.Location == nil { return errors.New("location is nil") } - err = block.ProtoDecode(protoBlock, protoBlock.Header.GetLocation().GetValue()) + err = workObject.ProtoDecode(protoWorkObject, sourceLocation, types.BlockObject) if err != nil { return err } - *dataPtr = *block + *dataPtr = *workObject return nil case *types.Header: protoHeader := &types.ProtoHeader{} @@ -262,7 +240,7 @@ func UnmarshalAndConvert(data []byte, sourceLocation common.Location, dataPtr *i return err } header := &types.Header{} - err = header.ProtoDecode(protoHeader) + err = header.ProtoDecode(protoHeader, sourceLocation) if err != nil { return err } diff --git a/p2p/pb/proto_services_test.go b/p2p/pb/proto_services_test.go index b6664259bc..8cb07c642d 100644 --- a/p2p/pb/proto_services_test.go +++ b/p2p/pb/proto_services_test.go @@ -36,8 +36,8 @@ func TestEncodeDecodeRequest(t *testing.T) { }, { name: "Transaction", - input: &types.Transaction{}, - expectedType: reflect.TypeOf(&types.Transaction{}), + input: types.NewEmptyTx(), + expectedType: reflect.TypeOf(types.NewEmptyTx()), }, { name: "Header", diff --git a/p2p/pb/quai_messages.pb.go b/p2p/pb/quai_messages.pb.go index 39a03c620b..772ab6162c 100644 --- a/p2p/pb/quai_messages.pb.go +++ b/p2p/pb/quai_messages.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.30.0 +// protoc v4.25.1 // source: p2p/pb/quai_messages.proto package pb @@ -24,16 +24,16 @@ const ( ) // GossipSub messages for broadcasting blocks and transactions -type GossipBlock struct { +type GossipWorkObject struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Block *types.ProtoBlock `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` + WorkObject *types.ProtoWorkObject `protobuf:"bytes,1,opt,name=work_object,json=workObject,proto3" json:"work_object,omitempty"` } -func (x *GossipBlock) Reset() { - *x = GossipBlock{} +func (x *GossipWorkObject) Reset() { + *x = GossipWorkObject{} if protoimpl.UnsafeEnabled { mi := &file_p2p_pb_quai_messages_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -41,13 +41,13 @@ func (x *GossipBlock) Reset() { } } -func (x *GossipBlock) String() string { +func (x *GossipWorkObject) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GossipBlock) ProtoMessage() {} +func (*GossipWorkObject) ProtoMessage() {} -func (x *GossipBlock) ProtoReflect() protoreflect.Message { +func (x *GossipWorkObject) ProtoReflect() protoreflect.Message { mi := &file_p2p_pb_quai_messages_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -59,14 +59,14 @@ func (x *GossipBlock) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GossipBlock.ProtoReflect.Descriptor instead. -func (*GossipBlock) Descriptor() ([]byte, []int) { +// Deprecated: Use GossipWorkObject.ProtoReflect.Descriptor instead. +func (*GossipWorkObject) Descriptor() ([]byte, []int) { return file_p2p_pb_quai_messages_proto_rawDescGZIP(), []int{0} } -func (x *GossipBlock) GetBlock() *types.ProtoBlock { +func (x *GossipWorkObject) GetWorkObject() *types.ProtoWorkObject { if x != nil { - return x.Block + return x.WorkObject } return nil } @@ -133,8 +133,7 @@ type QuaiRequestMessage struct { Data isQuaiRequestMessage_Data `protobuf_oneof:"data"` // Types that are assignable to Request: // - // *QuaiRequestMessage_Block - // *QuaiRequestMessage_Header + // *QuaiRequestMessage_WorkObject // *QuaiRequestMessage_Transaction // *QuaiRequestMessage_BlockHash // *QuaiRequestMessage_TrieNode @@ -215,16 +214,9 @@ func (m *QuaiRequestMessage) GetRequest() isQuaiRequestMessage_Request { return nil } -func (x *QuaiRequestMessage) GetBlock() *types.ProtoBlock { - if x, ok := x.GetRequest().(*QuaiRequestMessage_Block); ok { - return x.Block - } - return nil -} - -func (x *QuaiRequestMessage) GetHeader() *types.ProtoHeader { - if x, ok := x.GetRequest().(*QuaiRequestMessage_Header); ok { - return x.Header +func (x *QuaiRequestMessage) GetWorkObject() *types.ProtoWorkObject { + if x, ok := x.GetRequest().(*QuaiRequestMessage_WorkObject); ok { + return x.WorkObject } return nil } @@ -259,7 +251,7 @@ type QuaiRequestMessage_Hash struct { } type QuaiRequestMessage_Number struct { - Number []byte `protobuf:"bytes,7,opt,name=number,proto3,oneof"` + Number []byte `protobuf:"bytes,4,opt,name=number,proto3,oneof"` } func (*QuaiRequestMessage_Hash) isQuaiRequestMessage_Data() {} @@ -270,12 +262,8 @@ type isQuaiRequestMessage_Request interface { isQuaiRequestMessage_Request() } -type QuaiRequestMessage_Block struct { - Block *types.ProtoBlock `protobuf:"bytes,4,opt,name=block,proto3,oneof"` -} - -type QuaiRequestMessage_Header struct { - Header *types.ProtoHeader `protobuf:"bytes,5,opt,name=header,proto3,oneof"` +type QuaiRequestMessage_WorkObject struct { + WorkObject *types.ProtoWorkObject `protobuf:"bytes,5,opt,name=work_object,json=workObject,proto3,oneof"` } type QuaiRequestMessage_Transaction struct { @@ -283,16 +271,14 @@ type QuaiRequestMessage_Transaction struct { } type QuaiRequestMessage_BlockHash struct { - BlockHash *common.ProtoHash `protobuf:"bytes,8,opt,name=blockHash,proto3,oneof"` + BlockHash *common.ProtoHash `protobuf:"bytes,7,opt,name=block_hash,json=blockHash,proto3,oneof"` } type QuaiRequestMessage_TrieNode struct { - TrieNode *trie.ProtoTrieNode `protobuf:"bytes,9,opt,name=trieNode,proto3,oneof"` + TrieNode *trie.ProtoTrieNode `protobuf:"bytes,8,opt,name=trie_node,json=trieNode,proto3,oneof"` } -func (*QuaiRequestMessage_Block) isQuaiRequestMessage_Request() {} - -func (*QuaiRequestMessage_Header) isQuaiRequestMessage_Request() {} +func (*QuaiRequestMessage_WorkObject) isQuaiRequestMessage_Request() {} func (*QuaiRequestMessage_Transaction) isQuaiRequestMessage_Request() {} @@ -310,8 +296,7 @@ type QuaiResponseMessage struct { Location *common.ProtoLocation `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` // Types that are assignable to Response: // - // *QuaiResponseMessage_Block - // *QuaiResponseMessage_Header + // *QuaiResponseMessage_WorkObject // *QuaiResponseMessage_Transaction // *QuaiResponseMessage_BlockHash // *QuaiResponseMessage_TrieNode @@ -371,16 +356,9 @@ func (m *QuaiResponseMessage) GetResponse() isQuaiResponseMessage_Response { return nil } -func (x *QuaiResponseMessage) GetBlock() *types.ProtoBlock { - if x, ok := x.GetResponse().(*QuaiResponseMessage_Block); ok { - return x.Block - } - return nil -} - -func (x *QuaiResponseMessage) GetHeader() *types.ProtoHeader { - if x, ok := x.GetResponse().(*QuaiResponseMessage_Header); ok { - return x.Header +func (x *QuaiResponseMessage) GetWorkObject() *types.ProtoWorkObject { + if x, ok := x.GetResponse().(*QuaiResponseMessage_WorkObject); ok { + return x.WorkObject } return nil } @@ -410,29 +388,23 @@ type isQuaiResponseMessage_Response interface { isQuaiResponseMessage_Response() } -type QuaiResponseMessage_Block struct { - Block *types.ProtoBlock `protobuf:"bytes,3,opt,name=block,proto3,oneof"` -} - -type QuaiResponseMessage_Header struct { - Header *types.ProtoHeader `protobuf:"bytes,4,opt,name=header,proto3,oneof"` +type QuaiResponseMessage_WorkObject struct { + WorkObject *types.ProtoWorkObject `protobuf:"bytes,3,opt,name=work_object,json=workObject,proto3,oneof"` } type QuaiResponseMessage_Transaction struct { - Transaction *types.ProtoTransaction `protobuf:"bytes,5,opt,name=transaction,proto3,oneof"` + Transaction *types.ProtoTransaction `protobuf:"bytes,4,opt,name=transaction,proto3,oneof"` } type QuaiResponseMessage_BlockHash struct { - BlockHash *common.ProtoHash `protobuf:"bytes,6,opt,name=blockHash,proto3,oneof"` + BlockHash *common.ProtoHash `protobuf:"bytes,5,opt,name=block_hash,json=blockHash,proto3,oneof"` } type QuaiResponseMessage_TrieNode struct { - TrieNode *trie.ProtoTrieNode `protobuf:"bytes,7,opt,name=trieNode,proto3,oneof"` + TrieNode *trie.ProtoTrieNode `protobuf:"bytes,6,opt,name=trie_node,json=trieNode,proto3,oneof"` } -func (*QuaiResponseMessage_Block) isQuaiResponseMessage_Response() {} - -func (*QuaiResponseMessage_Header) isQuaiResponseMessage_Response() {} +func (*QuaiResponseMessage_WorkObject) isQuaiResponseMessage_Response() {} func (*QuaiResponseMessage_Transaction) isQuaiResponseMessage_Response() {} @@ -531,78 +503,76 @@ var file_p2p_pb_quai_messages_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x74, 0x72, 0x69, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x74, 0x72, 0x69, 0x65, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x36, - 0x0a, 0x0b, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x27, 0x0a, - 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0b, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa9, 0x03, 0x0a, 0x12, 0x51, 0x75, 0x61, 0x69, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x31, 0x0a, - 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x27, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, - 0x68, 0x48, 0x00, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x18, 0x0a, 0x06, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2c, - 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x48, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x0b, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x01, 0x52, 0x0b, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x09, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, - 0x01, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x31, 0x0a, 0x08, - 0x74, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x74, 0x72, 0x69, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x69, 0x65, 0x4e, - 0x6f, 0x64, 0x65, 0x48, 0x01, 0x52, 0x08, 0x74, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x42, - 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0xe0, 0x02, 0x0a, 0x13, 0x51, 0x75, 0x61, 0x69, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x31, 0x0a, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, - 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, - 0x00, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x2c, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x31, 0x0a, 0x08, 0x74, 0x72, 0x69, 0x65, 0x4e, 0x6f, - 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x72, 0x69, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, - 0x08, 0x74, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x0b, 0x51, 0x75, 0x61, 0x69, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3c, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x71, 0x75, 0x61, 0x69, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x51, 0x75, 0x61, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x61, 0x69, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x51, 0x75, 0x61, 0x69, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, - 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, - 0x6d, 0x69, 0x6e, 0x61, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, - 0x73, 0x2f, 0x67, 0x6f, 0x2d, 0x71, 0x75, 0x61, 0x69, 0x2f, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x62, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4b, + 0x0a, 0x10, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x4e, 0x0a, 0x11, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x39, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8d, 0x03, 0x0a, 0x12, + 0x51, 0x75, 0x61, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x31, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x00, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x18, + 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x01, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x48, 0x01, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x32, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x01, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x48, 0x61, 0x73, 0x68, 0x12, 0x32, 0x0a, 0x09, 0x74, 0x72, 0x69, 0x65, 0x5f, 0x6e, 0x6f, 0x64, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x72, 0x69, 0x65, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x01, 0x52, 0x08, + 0x74, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x02, 0x0a, 0x13, + 0x51, 0x75, 0x61, 0x69, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x31, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x57, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, + 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x48, 0x61, 0x73, 0x68, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, + 0x73, 0x68, 0x12, 0x32, 0x0a, 0x09, 0x74, 0x72, 0x69, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x72, 0x69, 0x65, 0x2e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x08, 0x74, 0x72, + 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x97, 0x01, 0x0a, 0x0b, 0x51, 0x75, 0x61, 0x69, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x3c, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x71, 0x75, 0x61, 0x69, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x2e, 0x51, 0x75, 0x61, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3f, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x71, 0x75, 0x61, 0x69, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x2e, 0x51, 0x75, 0x61, 0x69, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x2f, 0x5a, 0x2d, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x6f, 0x6d, 0x69, 0x6e, + 0x61, 0x6e, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x69, 0x65, 0x73, 0x2f, 0x67, + 0x6f, 0x2d, 0x71, 0x75, 0x61, 0x69, 0x2f, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -619,41 +589,38 @@ func file_p2p_pb_quai_messages_proto_rawDescGZIP() []byte { var file_p2p_pb_quai_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_p2p_pb_quai_messages_proto_goTypes = []interface{}{ - (*GossipBlock)(nil), // 0: quaiprotocol.GossipBlock + (*GossipWorkObject)(nil), // 0: quaiprotocol.GossipWorkObject (*GossipTransaction)(nil), // 1: quaiprotocol.GossipTransaction (*QuaiRequestMessage)(nil), // 2: quaiprotocol.QuaiRequestMessage (*QuaiResponseMessage)(nil), // 3: quaiprotocol.QuaiResponseMessage (*QuaiMessage)(nil), // 4: quaiprotocol.QuaiMessage - (*types.ProtoBlock)(nil), // 5: block.ProtoBlock + (*types.ProtoWorkObject)(nil), // 5: block.ProtoWorkObject (*types.ProtoTransaction)(nil), // 6: block.ProtoTransaction (*common.ProtoLocation)(nil), // 7: common.ProtoLocation (*common.ProtoHash)(nil), // 8: common.ProtoHash - (*types.ProtoHeader)(nil), // 9: block.ProtoHeader - (*trie.ProtoTrieNode)(nil), // 10: trie.ProtoTrieNode + (*trie.ProtoTrieNode)(nil), // 9: trie.ProtoTrieNode } var file_p2p_pb_quai_messages_proto_depIdxs = []int32{ - 5, // 0: quaiprotocol.GossipBlock.block:type_name -> block.ProtoBlock + 5, // 0: quaiprotocol.GossipWorkObject.work_object:type_name -> block.ProtoWorkObject 6, // 1: quaiprotocol.GossipTransaction.transaction:type_name -> block.ProtoTransaction 7, // 2: quaiprotocol.QuaiRequestMessage.location:type_name -> common.ProtoLocation 8, // 3: quaiprotocol.QuaiRequestMessage.hash:type_name -> common.ProtoHash - 5, // 4: quaiprotocol.QuaiRequestMessage.block:type_name -> block.ProtoBlock - 9, // 5: quaiprotocol.QuaiRequestMessage.header:type_name -> block.ProtoHeader - 6, // 6: quaiprotocol.QuaiRequestMessage.transaction:type_name -> block.ProtoTransaction - 8, // 7: quaiprotocol.QuaiRequestMessage.blockHash:type_name -> common.ProtoHash - 10, // 8: quaiprotocol.QuaiRequestMessage.trieNode:type_name -> trie.ProtoTrieNode - 7, // 9: quaiprotocol.QuaiResponseMessage.location:type_name -> common.ProtoLocation - 5, // 10: quaiprotocol.QuaiResponseMessage.block:type_name -> block.ProtoBlock - 9, // 11: quaiprotocol.QuaiResponseMessage.header:type_name -> block.ProtoHeader - 6, // 12: quaiprotocol.QuaiResponseMessage.transaction:type_name -> block.ProtoTransaction - 8, // 13: quaiprotocol.QuaiResponseMessage.blockHash:type_name -> common.ProtoHash - 10, // 14: quaiprotocol.QuaiResponseMessage.trieNode:type_name -> trie.ProtoTrieNode - 2, // 15: quaiprotocol.QuaiMessage.request:type_name -> quaiprotocol.QuaiRequestMessage - 3, // 16: quaiprotocol.QuaiMessage.response:type_name -> quaiprotocol.QuaiResponseMessage - 17, // [17:17] is the sub-list for method output_type - 17, // [17:17] is the sub-list for method input_type - 17, // [17:17] is the sub-list for extension type_name - 17, // [17:17] is the sub-list for extension extendee - 0, // [0:17] is the sub-list for field type_name + 5, // 4: quaiprotocol.QuaiRequestMessage.work_object:type_name -> block.ProtoWorkObject + 6, // 5: quaiprotocol.QuaiRequestMessage.transaction:type_name -> block.ProtoTransaction + 8, // 6: quaiprotocol.QuaiRequestMessage.block_hash:type_name -> common.ProtoHash + 9, // 7: quaiprotocol.QuaiRequestMessage.trie_node:type_name -> trie.ProtoTrieNode + 7, // 8: quaiprotocol.QuaiResponseMessage.location:type_name -> common.ProtoLocation + 5, // 9: quaiprotocol.QuaiResponseMessage.work_object:type_name -> block.ProtoWorkObject + 6, // 10: quaiprotocol.QuaiResponseMessage.transaction:type_name -> block.ProtoTransaction + 8, // 11: quaiprotocol.QuaiResponseMessage.block_hash:type_name -> common.ProtoHash + 9, // 12: quaiprotocol.QuaiResponseMessage.trie_node:type_name -> trie.ProtoTrieNode + 2, // 13: quaiprotocol.QuaiMessage.request:type_name -> quaiprotocol.QuaiRequestMessage + 3, // 14: quaiprotocol.QuaiMessage.response:type_name -> quaiprotocol.QuaiResponseMessage + 15, // [15:15] is the sub-list for method output_type + 15, // [15:15] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_p2p_pb_quai_messages_proto_init() } @@ -663,7 +630,7 @@ func file_p2p_pb_quai_messages_proto_init() { } if !protoimpl.UnsafeEnabled { file_p2p_pb_quai_messages_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GossipBlock); i { + switch v := v.(*GossipWorkObject); i { case 0: return &v.state case 1: @@ -726,15 +693,13 @@ func file_p2p_pb_quai_messages_proto_init() { file_p2p_pb_quai_messages_proto_msgTypes[2].OneofWrappers = []interface{}{ (*QuaiRequestMessage_Hash)(nil), (*QuaiRequestMessage_Number)(nil), - (*QuaiRequestMessage_Block)(nil), - (*QuaiRequestMessage_Header)(nil), + (*QuaiRequestMessage_WorkObject)(nil), (*QuaiRequestMessage_Transaction)(nil), (*QuaiRequestMessage_BlockHash)(nil), (*QuaiRequestMessage_TrieNode)(nil), } file_p2p_pb_quai_messages_proto_msgTypes[3].OneofWrappers = []interface{}{ - (*QuaiResponseMessage_Block)(nil), - (*QuaiResponseMessage_Header)(nil), + (*QuaiResponseMessage_WorkObject)(nil), (*QuaiResponseMessage_Transaction)(nil), (*QuaiResponseMessage_BlockHash)(nil), (*QuaiResponseMessage_TrieNode)(nil), diff --git a/p2p/pb/quai_messages.proto b/p2p/pb/quai_messages.proto index 17bc622ce8..87ca997530 100644 --- a/p2p/pb/quai_messages.proto +++ b/p2p/pb/quai_messages.proto @@ -8,7 +8,7 @@ import "trie/proto_trienode.proto"; import "core/types/proto_block.proto"; // GossipSub messages for broadcasting blocks and transactions -message GossipBlock { block.ProtoBlock block = 1; } +message GossipWorkObject { block.ProtoWorkObject work_object = 1; } message GossipTransaction { block.ProtoTransaction transaction = 1; } @@ -18,14 +18,13 @@ message QuaiRequestMessage { common.ProtoLocation location = 2; oneof data { common.ProtoHash hash = 3; - bytes number = 7; + bytes number = 4; } oneof request { - block.ProtoBlock block = 4; - block.ProtoHeader header = 5; - block.ProtoTransaction transaction = 6; - common.ProtoHash blockHash = 8; - trie.ProtoTrieNode trieNode = 9; + block.ProtoWorkObject work_object = 5; + block.ProtoTransaction transaction = 6; + common.ProtoHash block_hash = 7; + trie.ProtoTrieNode trie_node = 8; } } @@ -34,11 +33,10 @@ message QuaiResponseMessage { uint32 id = 1; common.ProtoLocation location = 2; oneof response { - block.ProtoBlock block = 3; - block.ProtoHeader header = 4; - block.ProtoTransaction transaction = 5; - common.ProtoHash blockHash = 6; - trie.ProtoTrieNode trieNode = 7; + block.ProtoWorkObject work_object = 3; + block.ProtoTransaction transaction = 4; + common.ProtoHash block_hash = 5; + trie.ProtoTrieNode trie_node = 6; } } @@ -47,4 +45,4 @@ message QuaiMessage { QuaiRequestMessage request = 1; QuaiResponseMessage response = 2; } -} \ No newline at end of file +} diff --git a/p2p/protocol/handler.go b/p2p/protocol/handler.go index b61e5b8082..57d33a4110 100644 --- a/p2p/protocol/handler.go +++ b/p2p/protocol/handler.go @@ -92,7 +92,7 @@ func handleRequest(quaiMsg *pb.QuaiRequestMessage, stream network.Stream, node Q } switch decodedType.(type) { - case *types.Block: + case *types.WorkObject: requestedHash := &common.Hash{} switch query := query.(type) { case *common.Hash: @@ -178,7 +178,7 @@ func handleResponse(quaiResp *pb.QuaiResponseMessage, node QuaiP2PNode) { // Seeks the block in the cache or database and sends it to the peer in a pb.QuaiResponseMessage func handleBlockRequest(id uint32, loc common.Location, hash common.Hash, stream network.Stream, node QuaiP2PNode) error { // check if we have the block in our cache or database - block := node.GetBlock(hash, loc) + block := node.GetWorkObject(hash, loc) if block == nil { log.Global.Debugf("block not found") return nil diff --git a/p2p/protocol/interface.go b/p2p/protocol/interface.go index c37343b346..a59c3d0e61 100644 --- a/p2p/protocol/interface.go +++ b/p2p/protocol/interface.go @@ -18,8 +18,8 @@ type QuaiP2PNode interface { GetBootPeers() []peer.AddrInfo // Search for a block in the node's cache, or query the consensus backend if it's not found in cache. // Returns nil if the block is not found. - GetBlock(hash common.Hash, location common.Location) *types.Block - GetHeader(hash common.Hash, location common.Location) *types.Header + GetWorkObject(hash common.Hash, location common.Location) *types.WorkObject + GetHeader(hash common.Hash, location common.Location) *types.WorkObject GetBlockHashByNumber(number *big.Int, location common.Location) *common.Hash GetTrieNode(hash common.Hash, location common.Location) *trie.TrieNodeResponse GetRequestManager() requestManager.RequestManager diff --git a/p2p/pubsubManager/gossipsub.go b/p2p/pubsubManager/gossipsub.go index 577f3aceb6..fbcf2d8ff5 100644 --- a/p2p/pubsubManager/gossipsub.go +++ b/p2p/pubsubManager/gossipsub.go @@ -45,7 +45,7 @@ func NewGossipSubManager(ctx context.Context, h host.Host) (*PubsubManager, erro make(map[string]*pubsub.Subscription), make(map[string]*pubsub.Topic), nil, - utils.MakeGenesis().ToHeader(0).Hash(), + utils.MakeGenesis().ToBlock(0).Hash(), nil, }, nil } diff --git a/p2p/pubsubManager/utils.go b/p2p/pubsubManager/utils.go index 4c8a5e120e..5a0aa51a48 100644 --- a/p2p/pubsubManager/utils.go +++ b/p2p/pubsubManager/utils.go @@ -10,7 +10,7 @@ import ( const ( // Data types for gossipsub topics - C_blockType = "blocks" + C_workObjectType = "blocks" C_transactionType = "transactions" C_headerType = "headers" C_hashType = "hash" @@ -20,8 +20,8 @@ const ( func TopicName(genesis common.Hash, location common.Location, data interface{}) (string, error) { baseTopic := strings.Join([]string{genesis.String(), location.Name()}, "/") switch data.(type) { - case *types.Block: - return strings.Join([]string{baseTopic, C_blockType}, "/"), nil + case *types.WorkObject: + return strings.Join([]string{baseTopic, C_workObjectType}, "/"), nil case common.Hash: return strings.Join([]string{baseTopic, C_hashType}, "/"), nil case *types.Transaction: diff --git a/params/config.go b/params/config.go index 72a08c1eaf..4f92d1e82c 100644 --- a/params/config.go +++ b/params/config.go @@ -36,7 +36,7 @@ var ( Blake3PowColosseumGenesisHash = common.HexToHash("0xf8667e0e993cfd3c19c474fed7d8c070be3180bfbe911592a59f61ac9b0278e4") Blake3PowGardenGenesisHash = common.HexToHash("0xadb8b9429e719e1c54717925d920c7126afb82be85eaa3a044b51abf8f62ac24") Blake3PowOrchardGenesisHash = common.HexToHash("0xd1d433069d581af323157a32c0ceecd121acbc9e3e80ec07f403b7da002fb9d5") - Blake3PowLocalGenesisHash = common.HexToHash("0xe28db2c6fbc597ff900cd32ca853cf25cfa0d53e969f037816bb95f9119881fa") + Blake3PowLocalGenesisHash = common.HexToHash("0xcbe14caf0911f6253a00ea5ad896903cb9cf19e7a65bb527d3b8088724b408a9") Blake3PowLighthouseGenesisHash = common.HexToHash("0x4daa067e9f4d540f8810f4a841ff6aa906c0fd707706e62afb336858f22d2f4f") ) diff --git a/params/protocol_params.go b/params/protocol_params.go index e6a2bbe040..cd85264ced 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -188,10 +188,10 @@ var ( LighthouseDurationLimit = big.NewInt(7) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not. LocalDurationLimit = big.NewInt(2) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not. TimeFactor = big.NewInt(7) - TimeToStartTx uint64 = 5 * BlocksPerDay + TimeToStartTx uint64 = 0 * BlocksPerDay BlocksPerDay uint64 = new(big.Int).Div(big.NewInt(86400), DurationLimit).Uint64() // BlocksPerDay is the number of blocks per day assuming 12 second block time - PrimeEntropyTarget = big.NewInt(441) // This is TimeFactor*TimeFactor*common.NumZonesInRegion*common.NumRegionsInPrime - RegionEntropyTarget = big.NewInt(21) // This is TimeFactor*common.NumZonesInRegion - DifficultyAdjustmentPeriod = big.NewInt(360) // This is the number of blocks over which the average has to be taken - DifficultyAdjustmentFactor int64 = 40 // This is the factor that divides the log of the change in the difficulty + PrimeEntropyTarget = big.NewInt(441) // This is TimeFactor*TimeFactor*common.NumZonesInRegion*common.NumRegionsInPrime + RegionEntropyTarget = big.NewInt(21) // This is TimeFactor*common.NumZonesInRegion + DifficultyAdjustmentPeriod = big.NewInt(360) // This is the number of blocks over which the average has to be taken + DifficultyAdjustmentFactor int64 = 40 // This is the factor that divides the log of the change in the difficulty ) diff --git a/quai/api.go b/quai/api.go index 9fc74da0d6..5841b59cc9 100644 --- a/quai/api.go +++ b/quai/api.go @@ -33,7 +33,6 @@ import ( "github.com/dominant-strategies/go-quai/core/rawdb" "github.com/dominant-strategies/go-quai/core/state" "github.com/dominant-strategies/go-quai/core/types" - "github.com/dominant-strategies/go-quai/internal/quaiapi" "github.com/dominant-strategies/go-quai/rlp" "github.com/dominant-strategies/go-quai/rpc" "github.com/dominant-strategies/go-quai/trie" @@ -173,7 +172,7 @@ func (api *PrivateAdminAPI) ExportChain(file string, first *uint64, last *uint64 return true, nil } -func hasAllBlocks(chain *core.Core, bs []*types.Block) bool { +func hasAllBlocks(chain *core.Core, bs []*types.WorkObject) bool { for _, b := range bs { if !chain.HasBlock(b.Hash(), b.NumberU64(chain.NodeCtx())) { return false @@ -201,11 +200,11 @@ func (api *PrivateAdminAPI) ImportChain(file string) (bool, error) { // Run actual the import in pre-configured batches stream := rlp.NewStream(reader, 0) - blocks, index := make([]*types.Block, 0, 2500), 0 + blocks, index := make([]*types.WorkObject, 0, 2500), 0 for batch := 0; ; batch++ { // Load a batch of blocks from the input file for len(blocks) < cap(blocks) { - block := new(types.Block) + block := new(types.WorkObject) if err := stream.Decode(block); err == io.EOF { break } else if err != nil { @@ -252,7 +251,7 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error if blockNr == rpc.PendingBlockNumber { return state.Dump{}, nil } - var block *types.Block + var block *types.WorkObject if blockNr == rpc.LatestBlockNumber { block = api.quai.core.CurrentBlock() } else { @@ -288,43 +287,6 @@ func (api *PrivateDebugAPI) Preimage(ctx context.Context, hash common.Hash) (hex return nil, errors.New("unknown preimage") } -// BadBlockArgs represents the entries in the list returned when bad blocks are queried. -type BadBlockArgs struct { - Hash common.Hash `json:"hash"` - Block map[string]interface{} `json:"block"` - RLP string `json:"rlp"` -} - -// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network -// and returns them as a JSON list of block-hashes -func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) { - var ( - err error - blocks = rawdb.ReadAllBadBlocks(api.quai.chainDb, api.quai.config.NodeLocation) - results = make([]*BadBlockArgs, 0, len(blocks)) - ) - for _, block := range blocks { - var ( - blockRlp string - blockJSON map[string]interface{} - ) - if rlpBytes, err := rlp.EncodeToBytes(block); err != nil { - blockRlp = err.Error() // Hacky, but hey, it works - } else { - blockRlp = fmt.Sprintf("0x%x", rlpBytes) - } - if blockJSON, err = quaiapi.RPCMarshalBlock(block, true, true, api.quai.core.NodeLocation()); err != nil { - blockJSON = map[string]interface{}{"error": err.Error()} - } - results = append(results, &BadBlockArgs{ - Hash: block.Hash(), - RLP: blockRlp, - Block: blockJSON, - }) - } - return results, nil -} - // AccountRangeMaxResults is the maximum number of results to be returned per call const AccountRangeMaxResults = 256 @@ -340,7 +302,7 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta // the miner and operate on those stateDb = &state.StateDB{} } else { - var block *types.Block + var block *types.WorkObject if number == rpc.LatestBlockNumber { block = api.quai.core.CurrentBlock() } else { @@ -444,7 +406,7 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeRes // // With one parameter, returns the list of accounts modified in the specified block. func (api *PrivateDebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) { - var startBlock, endBlock *types.Block + var startBlock, endBlock *types.WorkObject startBlock = api.quai.core.GetBlockByNumber(startNum) if startBlock == nil { @@ -473,7 +435,7 @@ func (api *PrivateDebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum // // With one parameter, returns the list of accounts modified in the specified block. func (api *PrivateDebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) { - var startBlock, endBlock *types.Block + var startBlock, endBlock *types.WorkObject startBlock = api.quai.core.GetBlockByHash(startHash) if startBlock == nil { return nil, fmt.Errorf("start block %x not found", startHash) @@ -495,7 +457,7 @@ func (api *PrivateDebugAPI) GetModifiedAccountsByHash(startHash common.Hash, end return api.getModifiedAccounts(startBlock, endBlock) } -func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) { +func (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.WorkObject) ([]common.Address, error) { nodeLocation := api.quai.core.NodeLocation() nodeCtx := api.quai.core.NodeCtx() if startBlock.NumberU64(nodeCtx) >= endBlock.NumberU64(nodeCtx) { diff --git a/quai/api_backend.go b/quai/api_backend.go index fa7b2ced7b..ded98db698 100644 --- a/quai/api_backend.go +++ b/quai/api_backend.go @@ -66,7 +66,7 @@ func (b *QuaiAPIBackend) NodeCtx() int { return b.quai.core.NodeCtx() } -func (b *QuaiAPIBackend) CurrentBlock() *types.Block { +func (b *QuaiAPIBackend) CurrentBlock() *types.WorkObject { return b.quai.core.CurrentBlock() } @@ -76,29 +76,29 @@ func (b *QuaiAPIBackend) CurrentLogEntropy() *big.Int { } // TotalLogS returns the total entropy reduction if the chain since genesis to the given header -func (b *QuaiAPIBackend) TotalLogS(header *types.Header) *big.Int { +func (b *QuaiAPIBackend) TotalLogS(header *types.WorkObject) *big.Int { return b.quai.core.TotalLogS(header) } // CalcOrder returns the order of the block within the hierarchy of chains -func (b *QuaiAPIBackend) CalcOrder(header *types.Header) (*big.Int, int, error) { +func (b *QuaiAPIBackend) CalcOrder(header *types.WorkObject) (*big.Int, int, error) { return b.quai.core.CalcOrder(header) } -func (b *QuaiAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { +func (b *QuaiAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := b.quai.core.PendingBlock() - return block.Header(), nil + return block, nil } // Otherwise resolve and return the block if number == rpc.LatestBlockNumber { - return b.quai.core.CurrentBlock().Header(), nil + return b.quai.core.CurrentBlock(), nil } return b.quai.core.GetHeaderByNumber(uint64(number)), nil } -func (b *QuaiAPIBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { +func (b *QuaiAPIBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.WorkObject, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.HeaderByNumber(ctx, blockNr) } @@ -115,11 +115,11 @@ func (b *QuaiAPIBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash return nil, errors.New("invalid arguments; neither block nor hash specified") } -func (b *QuaiAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { +func (b *QuaiAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) { return b.quai.core.GetHeaderByHash(hash), nil } -func (b *QuaiAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { +func (b *QuaiAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := b.quai.core.PendingBlock() @@ -136,15 +136,15 @@ func (b *QuaiAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumb return nil, errors.New("block is nil api backend") } -func (b *QuaiAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { +func (b *QuaiAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) { return b.quai.core.GetBlockByHash(hash), nil } -func (b *QuaiAPIBackend) BlockOrCandidateByHash(hash common.Hash) *types.Block { +func (b *QuaiAPIBackend) BlockOrCandidateByHash(hash common.Hash) *types.WorkObject { return b.quai.core.GetBlockOrCandidateByHash(hash) } -func (b *QuaiAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { +func (b *QuaiAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.WorkObject, error) { if blockNr, ok := blockNrOrHash.Number(); ok { return b.BlockByNumber(ctx, blockNr) } @@ -165,11 +165,11 @@ func (b *QuaiAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash return nil, errors.New("invalid arguments; neither block nor hash specified") } -func (b *QuaiAPIBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { +func (b *QuaiAPIBackend) PendingBlockAndReceipts() (*types.WorkObject, types.Receipts) { return b.quai.core.PendingBlockAndReceipts() } -func (b *QuaiAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { +func (b *QuaiAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.WorkObject, error) { nodeCtx := b.quai.core.NodeCtx() if nodeCtx != common.ZONE_CTX { return nil, nil, errors.New("stateAndHeaderByNumber can only be called in zone chain") @@ -177,7 +177,7 @@ func (b *QuaiAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc. // Pending state is only known by the miner if number == rpc.PendingBlockNumber { block := b.quai.core.Pending() - return &state.StateDB{}, block.Header(), nil + return &state.StateDB{}, block, nil } // Otherwise resolve the block number and return its state header, err := b.HeaderByNumber(ctx, number) @@ -191,7 +191,7 @@ func (b *QuaiAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc. return stateDb, header, err } -func (b *QuaiAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { +func (b *QuaiAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.WorkObject, error) { nodeCtx := b.quai.core.NodeCtx() if nodeCtx != common.ZONE_CTX { return nil, nil, errors.New("stateAndHeaderByNumberOrHash can only be called in zone chain") @@ -253,7 +253,7 @@ func (b *QuaiAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty return logs, nil } -func (b *QuaiAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { +func (b *QuaiAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.WorkObject, vmConfig *vm.Config) (*vm.EVM, func() error, error) { vmError := func() error { return nil } nodeCtx := b.quai.core.NodeCtx() if nodeCtx != common.ZONE_CTX { @@ -348,7 +348,7 @@ func (b *QuaiAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) if nodeCtx != common.ZONE_CTX { return nil, common.Hash{}, 0, 0, errors.New("getTransaction can only be called in zone chain") } - tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.quai.ChainDb(), txHash, b.NodeLocation()) + tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.quai.ChainDb(), txHash) if tx == nil { return nil, common.Hash{}, 0, 0, errors.New("transaction not found") } @@ -446,11 +446,11 @@ func (b *QuaiAPIBackend) Engine() consensus.Engine { return b.quai.engine } -func (b *QuaiAPIBackend) CurrentHeader() *types.Header { +func (b *QuaiAPIBackend) CurrentHeader() *types.WorkObject { return b.quai.core.CurrentHeader() } -func (b *QuaiAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { +func (b *QuaiAPIBackend) StateAtBlock(ctx context.Context, block *types.WorkObject, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { nodeCtx := b.quai.core.NodeCtx() if nodeCtx != common.ZONE_CTX { return nil, errors.New("stateAtBlock can only be called in zone chain") @@ -458,7 +458,7 @@ func (b *QuaiAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, r return b.quai.core.StateAtBlock(block, reexec, base, checkLive) } -func (b *QuaiAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { +func (b *QuaiAPIBackend) StateAtTransaction(ctx context.Context, block *types.WorkObject, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { nodeCtx := b.quai.core.NodeCtx() if nodeCtx != common.ZONE_CTX { return nil, vm.BlockContext{}, nil, errors.New("stateAtTransaction can only be called in zone chain") @@ -466,7 +466,7 @@ func (b *QuaiAPIBackend) StateAtTransaction(ctx context.Context, block *types.Bl return b.quai.core.StateAtTransaction(block, txIndex, reexec) } -func (b *QuaiAPIBackend) Append(header *types.Header, manifest types.BlockManifest, domPendingHeader *types.Header, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { +func (b *QuaiAPIBackend) Append(header *types.WorkObject, manifest types.BlockManifest, domPendingHeader *types.WorkObject, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { return b.quai.core.Append(header, manifest, domPendingHeader, domTerminus, domOrigin, newInboundEtxs) } @@ -474,19 +474,19 @@ func (b *QuaiAPIBackend) DownloadBlocksInManifest(hash common.Hash, manifest typ b.quai.core.DownloadBlocksInManifest(hash, manifest, entropy) } -func (b *QuaiAPIBackend) ConstructLocalMinedBlock(header *types.Header) (*types.Block, error) { +func (b *QuaiAPIBackend) ConstructLocalMinedBlock(header *types.WorkObject) (*types.WorkObject, error) { return b.quai.core.ConstructLocalMinedBlock(header) } -func (b *QuaiAPIBackend) InsertBlock(ctx context.Context, block *types.Block) (int, error) { - return b.quai.core.InsertChain([]*types.Block{block}) +func (b *QuaiAPIBackend) InsertBlock(ctx context.Context, block *types.WorkObject) (int, error) { + return b.quai.core.InsertChain([]*types.WorkObject{block}) } -func (b *QuaiAPIBackend) WriteBlock(block *types.Block) { +func (b *QuaiAPIBackend) WriteBlock(block *types.WorkObject) { b.quai.core.WriteBlock(block) } -func (b *QuaiAPIBackend) PendingBlock() *types.Block { +func (b *QuaiAPIBackend) PendingBlock() *types.WorkObject { return b.quai.core.PendingBlock() } @@ -506,7 +506,7 @@ func (b *QuaiAPIBackend) ProcessingState() bool { return b.quai.core.ProcessingState() } -func (b *QuaiAPIBackend) NewGenesisPendingHeader(pendingHeader *types.Header, domTerminus common.Hash, genesisHash common.Hash) { +func (b *QuaiAPIBackend) NewGenesisPendingHeader(pendingHeader *types.WorkObject, domTerminus common.Hash, genesisHash common.Hash) { b.quai.core.NewGenesisPendigHeader(pendingHeader, domTerminus, genesisHash) } @@ -514,11 +514,11 @@ func (b *QuaiAPIBackend) SetCurrentExpansionNumber(expansionNumber uint8) { b.quai.core.SetCurrentExpansionNumber(expansionNumber) } -func (b *QuaiAPIBackend) WriteGenesisBlock(block *types.Block, location common.Location) { +func (b *QuaiAPIBackend) WriteGenesisBlock(block *types.WorkObject, location common.Location) { b.quai.core.WriteGenesisBlock(block, location) } -func (b *QuaiAPIBackend) GetPendingHeader() (*types.Header, error) { +func (b *QuaiAPIBackend) GetPendingHeader() (*types.WorkObject, error) { return b.quai.core.GetPendingHeader() } @@ -538,11 +538,11 @@ func (b *QuaiAPIBackend) AddPendingEtxsRollup(pEtxsRollup types.PendingEtxsRollu return b.quai.core.AddPendingEtxsRollup(pEtxsRollup) } -func (b *QuaiAPIBackend) SubscribePendingHeaderEvent(ch chan<- *types.Header) event.Subscription { +func (b *QuaiAPIBackend) SubscribePendingHeaderEvent(ch chan<- *types.WorkObject) event.Subscription { return b.quai.core.SubscribePendingHeader(ch) } -func (b *QuaiAPIBackend) GenerateRecoveryPendingHeader(pendingHeader *types.Header, checkpointHashes types.Termini) error { +func (b *QuaiAPIBackend) GenerateRecoveryPendingHeader(pendingHeader *types.WorkObject, checkpointHashes types.Termini) error { return b.quai.core.GenerateRecoveryPendingHeader(pendingHeader, checkpointHashes) } @@ -566,7 +566,7 @@ func (b *QuaiAPIBackend) SetSubClient(client *quaiclient.Client, location common b.quai.core.SetSubClient(client, location) } -func (b *QuaiAPIBackend) AddGenesisPendingEtxs(block *types.Block) { +func (b *QuaiAPIBackend) AddGenesisPendingEtxs(block *types.WorkObject) { b.quai.core.AddGenesisPendingEtxs(block) } @@ -577,6 +577,6 @@ func (b *QuaiAPIBackend) SubscribeExpansionEvent(ch chan<- core.ExpansionEvent) // /////////////////////////// // /////// P2P /////////////// // /////////////////////////// -func (b *QuaiAPIBackend) BroadcastBlock(block *types.Block, location common.Location) error { +func (b *QuaiAPIBackend) BroadcastBlock(block *types.WorkObject, location common.Location) error { return b.quai.p2p.Broadcast(location, block) } diff --git a/quai/backend.go b/quai/backend.go index 53ad7517da..f517fe44e2 100644 --- a/quai/backend.go +++ b/quai/backend.go @@ -81,7 +81,7 @@ type Quai struct { // New creates a new Quai object (including the // initialisation of the common Quai object) -func New(stack *node.Node, p2p NetworkingAPI, config *quaiconfig.Config, nodeCtx int, currentExpansionNumber uint8, startingExpansionNumber uint64, genesisBlock *types.Block, logger *log.Logger) (*Quai, error) { +func New(stack *node.Node, p2p NetworkingAPI, config *quaiconfig.Config, nodeCtx int, currentExpansionNumber uint8, startingExpansionNumber uint64, genesisBlock *types.WorkObject, logger *log.Logger) (*Quai, error) { // Ensure configuration values are compatible and sane if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(common.Big0) <= 0 { logger.WithFields(log.Fields{ @@ -105,7 +105,7 @@ func New(stack *node.Node, p2p NetworkingAPI, config *quaiconfig.Config, nodeCtx }).Info("Allocated trie memory caches") // Assemble the Quai object - chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false) + chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false, config.NodeLocation) if err != nil { return nil, err } @@ -132,7 +132,7 @@ func New(stack *node.Node, p2p NetworkingAPI, config *quaiconfig.Config, nodeCtx // This only happens during the expansion if genesisBlock != nil { // write the block to the database - rawdb.WriteBlock(chainDb, genesisBlock, nodeCtx) + rawdb.WriteWorkObject(chainDb, genesisBlock.Hash(), genesisBlock, types.BlockObject, nodeCtx) rawdb.WriteHeadBlockHash(chainDb, genesisBlock.Hash()) // Initialize slice state for genesis knot genesisTermini := types.EmptyTermini() @@ -288,9 +288,6 @@ func New(stack *node.Node, p2p NetworkingAPI, config *quaiconfig.Config, nodeCtx func (s *Quai) APIs() []rpc.API { apis := quaiapi.GetAPIs(s.APIBackend) - // Append any APIs exposed explicitly by the consensus engine - apis = append(apis, s.engine.APIs(s.Core())...) - // Append all the local APIs and return return append(apis, []rpc.API{ { @@ -352,7 +349,7 @@ func (s *Quai) Etherbase() (eb common.Address, err error) { // // We regard two types of accounts as local miner account: etherbase // and accounts specified via `txpool.locals` flag. -func (s *Quai) isLocalBlock(header *types.Header) bool { +func (s *Quai) isLocalBlock(header *types.WorkObject) bool { author, err := s.engine.Author(header) if err != nil { s.logger.WithFields(log.Fields{ @@ -386,8 +383,8 @@ func (s *Quai) isLocalBlock(header *types.Header) bool { // shouldPreserve checks whether we should preserve the given block // during the chain reorg depending on whether the author of block // is a local account. -func (s *Quai) shouldPreserve(block *types.Block) bool { - return s.isLocalBlock(block.Header()) +func (s *Quai) shouldPreserve(block *types.WorkObject) bool { + return s.isLocalBlock(block) } func (s *Quai) Core() *core.Core { return s.core } @@ -420,7 +417,6 @@ func (s *Quai) Stop() error { close(s.closeBloomHandler) } s.core.Stop() - s.engine.Close() s.chainDb.Close() s.eventMux.Stop() s.handler.Stop() diff --git a/quai/filters/api.go b/quai/filters/api.go index a5e8300385..6ff8fdc4d6 100644 --- a/quai/filters/api.go +++ b/quai/filters/api.go @@ -603,14 +603,14 @@ func (api *PublicFilterAPI) PendingHeader(ctx context.Context) (*rpc.Subscriptio rpcSub := notifier.CreateSubscription() go func() { - header := make(chan *types.Header, c_pendingHeaderChSize) + header := make(chan *types.WorkObject, c_pendingHeaderChSize) headerSub := api.backend.SubscribePendingHeaderEvent(header) for { select { case b := <-header: // Marshal the header data - marshalHeader := b.RPCMarshalHeader() + marshalHeader := b.RPCMarshalWorkObject() notifier.Notify(rpcSub.ID, marshalHeader) case <-rpcSub.Err(): headerSub.Unsubscribe() diff --git a/quai/filters/filter.go b/quai/filters/filter.go index c5e7859cc5..fb3eff7d13 100644 --- a/quai/filters/filter.go +++ b/quai/filters/filter.go @@ -32,8 +32,8 @@ import ( type Backend interface { ChainDb() ethdb.Database - HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) - HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) + HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.WorkObject, error) + HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.WorkObject, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) GetBloom(blockHash common.Hash) (*types.Bloom, error) @@ -42,7 +42,7 @@ type Backend interface { SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription - SubscribePendingHeaderEvent(ch chan<- *types.Header) event.Subscription + SubscribePendingHeaderEvent(ch chan<- *types.WorkObject) event.Subscription ProcessingState() bool NodeLocation() common.Location NodeCtx() int @@ -130,7 +130,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { if header == nil { return nil, errors.New("unknown block") } - return f.blockLogs(ctx, header) + return f.blockLogs(ctx, header.Header()) } // Figure out the limits of the filter range header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) @@ -202,7 +202,7 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, err if header == nil || err != nil { return logs, err } - found, err := f.checkMatches(ctx, header) + found, err := f.checkMatches(ctx, header.Header()) if err != nil { return logs, err } @@ -224,7 +224,7 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e if header == nil || err != nil { return logs, err } - found, err := f.blockLogs(ctx, header) + found, err := f.blockLogs(ctx, header.Header()) if err != nil { return logs, err } diff --git a/quai/gasprice/feehistory.go b/quai/gasprice/feehistory.go index 524a8e3eee..98bc2aff9a 100644 --- a/quai/gasprice/feehistory.go +++ b/quai/gasprice/feehistory.go @@ -50,8 +50,7 @@ const ( type blockFees struct { // set by the caller blockNumber uint64 - header *types.Header - block *types.Block // only set if reward percentiles are requested + block *types.WorkObject // only set if reward percentiles are requested receipts types.Receipts // filled by processBlock reward []*big.Int @@ -82,24 +81,24 @@ func (s sortGasAndReward) Less(i, j int) bool { // fills in the rest of the fields. func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { chainconfig := oracle.backend.ChainConfig() - if bf.baseFee = bf.header.BaseFee(); bf.baseFee == nil { + if bf.baseFee = bf.block.BaseFee(); bf.baseFee == nil { bf.baseFee = new(big.Int) } - bf.nextBaseFee = misc.CalcBaseFee(chainconfig, bf.header) + bf.nextBaseFee = misc.CalcBaseFee(chainconfig, bf.block) - bf.gasUsedRatio = float64(bf.header.GasUsed()) / float64(bf.header.GasLimit()) + bf.gasUsedRatio = float64(bf.block.GasUsed()) / float64(bf.block.GasLimit()) if len(percentiles) == 0 { // rewards were not requested, return null return } - if bf.block == nil || (bf.receipts == nil && len(bf.block.Transactions()) != 0) { + if bf.block == nil || (bf.receipts == nil && len(bf.block.Body().Transactions()) != 0) { oracle.logger.Error("Block or receipts are missing while reward percentiles are requested") return } bf.reward = make([]*big.Int, len(percentiles)) - if len(bf.block.Transactions()) == 0 { + if len(bf.block.Body().Transactions()) == 0 { // return an all zero row if there are no transactions to gather data from for i := range bf.reward { bf.reward[i] = new(big.Int) @@ -132,10 +131,10 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { // also returned if requested and available. // Note: an error is only returned if retrieving the head header has failed. If there are no // retrievable blocks in the specified range then zero block count is returned with no error. -func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.BlockNumber, blocks, maxHistory int) (*types.Block, []*types.Receipt, uint64, int, error) { +func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.BlockNumber, blocks, maxHistory int) (*types.WorkObject, []*types.Receipt, uint64, int, error) { var ( headBlock rpc.BlockNumber - pendingBlock *types.Block + pendingBlock *types.WorkObject pendingReceipts types.Receipts nodeCtx = oracle.backend.ChainConfig().Location.Context() ) @@ -227,7 +226,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast maxHistory = oracle.maxBlockHistory } var ( - pendingBlock *types.Block + pendingBlock *types.WorkObject pendingReceipts []*types.Receipt err error ) @@ -260,13 +259,13 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast fees.receipts, fees.err = oracle.backend.GetReceipts(ctx, fees.block.Hash()) } } else { - fees.header, fees.err = oracle.backend.HeaderByNumber(ctx, rpc.BlockNumber(blockNumber)) + fees.block, fees.err = oracle.backend.HeaderByNumber(ctx, rpc.BlockNumber(blockNumber)) } } if fees.block != nil { - fees.header = fees.block.Header() + fees.block = types.CopyWorkObject(fees.block) } - if fees.header != nil { + if fees.block != nil { oracle.processBlock(fees, rewardPercentiles) } // send to results even if empty to guarantee that blocks items are sent in total @@ -286,7 +285,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast return common.Big0, nil, nil, nil, fees.err } i := int(fees.blockNumber - oldestBlock) - if fees.header != nil { + if fees.block != nil { reward[i], baseFee[i], baseFee[i+1], gasUsedRatio[i] = fees.reward, fees.baseFee, fees.nextBaseFee, fees.gasUsedRatio } else { // getting no block and no error means we are requesting into the future (might happen because of a reorg) diff --git a/quai/gasprice/gasprice.go b/quai/gasprice/gasprice.go index d59ac3b39c..868df774a0 100644 --- a/quai/gasprice/gasprice.go +++ b/quai/gasprice/gasprice.go @@ -48,10 +48,10 @@ type Config struct { // OracleBackend includes all necessary background APIs for oracle. type OracleBackend interface { - HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) - BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) + HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) + BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) - PendingBlockAndReceipts() (*types.Block, types.Receipts) + PendingBlockAndReceipts() (*types.WorkObject, types.Receipts) ChainConfig() *params.ChainConfig } @@ -182,7 +182,7 @@ func (oracle *Oracle) getBlockValues(ctx context.Context, signer types.Signer, b return } // Sort the transaction by effective tip in ascending sort. - txs := make([]*types.Transaction, len(block.Transactions())) + txs := make([]*types.Transaction, len(block.Body().Transactions())) copy(txs, block.Transactions()) sorter := newSorter(txs, block.BaseFee()) sort.Sort(sorter) diff --git a/quai/gasprice/gasprice_test.go b/quai/gasprice/gasprice_test.go index 2928a81357..62e39c3a61 100644 --- a/quai/gasprice/gasprice_test.go +++ b/quai/gasprice/gasprice_test.go @@ -140,11 +140,11 @@ func newTestBackend(t *testing.T, pending bool) *testBackend { return &testBackend{chain: chain, pending: pending} } -func (b *testBackend) CurrentHeader() *types.Header { +func (b *testBackend) CurrentHeader() *types.WorkObject { return b.chain.CurrentHeader() } -func (b *testBackend) GetBlockByNumber(number uint64) *types.Block { +func (b *testBackend) GetBlockByNumber(number uint64) *types.WorkObject { return b.chain.GetBlockByNumber(number) } diff --git a/quai/handler.go b/quai/handler.go index 02824bd743..c4072de149 100644 --- a/quai/handler.go +++ b/quai/handler.go @@ -1,14 +1,15 @@ package quai import ( + "math/big" + "sync" + "time" + "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/core" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/event" "github.com/dominant-strategies/go-quai/log" - "math/big" - "sync" - "time" ) const ( @@ -80,10 +81,10 @@ func (h *handler) missingBlockLoop() { select { case blockRequest := <-h.missingBlockCh: go func() { - resultCh := h.p2pBackend.Request(h.nodeLocation, blockRequest.Hash, &types.Block{}) + resultCh := h.p2pBackend.Request(h.nodeLocation, blockRequest.Hash, &types.WorkObject{}) block := <-resultCh if block != nil { - h.core.WriteBlock(block.(*types.Block)) + h.core.WriteBlock(block.(*types.WorkObject)) } }() case <-h.missingBlockSub.Err(): @@ -133,10 +134,10 @@ func (h *handler) checkNextPrimeBlock() { // appended database we ask the peer for the block with this hash if block == nil { go func() { - resultCh := h.p2pBackend.Request(h.nodeLocation, blockHash, &types.Block{}) + resultCh := h.p2pBackend.Request(h.nodeLocation, blockHash, &types.WorkObject{}) block := <-resultCh if block != nil { - h.core.WriteBlock(block.(*types.Block)) + h.core.WriteBlock(block.(*types.WorkObject)) } }() } diff --git a/quai/interface.go b/quai/interface.go index 926c819822..7db63eee8c 100644 --- a/quai/interface.go +++ b/quai/interface.go @@ -30,7 +30,7 @@ type ConsensusAPI interface { // Asks the consensus backend to lookup a block by hash and location. // If the block is found, it should be returned. Otherwise, nil should be returned. - LookupBlock(common.Hash, common.Location) *types.Block + LookupBlock(common.Hash, common.Location) *types.WorkObject LookupBlockHashByNumber(*big.Int, common.Location) *common.Hash @@ -51,10 +51,10 @@ type ConsensusAPI interface { SetSubClient(*quaiclient.Client, common.Location, common.Location) // AddGenesisPendingEtxs adds the genesis pending etxs for the given location - AddGenesisPendingEtxs(*types.Block, common.Location) + AddGenesisPendingEtxs(*types.WorkObject, common.Location) // WriteGenesisBlock adds the genesis block to the database and also writes the block to the disk - WriteGenesisBlock(*types.Block, common.Location) + WriteGenesisBlock(*types.WorkObject, common.Location) } // The networking backend will implement the following interface to enable consensus to communicate with other nodes. diff --git a/quai/p2p_backend.go b/quai/p2p_backend.go index dc94234fd2..1df71055a0 100644 --- a/quai/p2p_backend.go +++ b/quai/p2p_backend.go @@ -79,8 +79,8 @@ func (qbe *QuaiBackend) GetBackend(location common.Location) *quaiapi.Backend { // Handle consensus data propagated to us from our peers func (qbe *QuaiBackend) OnNewBroadcast(sourcePeer p2p.PeerID, data interface{}, nodeLocation common.Location) bool { switch data.(type) { - case types.Block: - block := data.(types.Block) + case types.WorkObject: + block := data.(types.WorkObject) backend := *qbe.GetBackend(nodeLocation) if backend == nil { log.Global.Error("no backend found") @@ -124,8 +124,8 @@ func (qbe *QuaiBackend) ValidatorFunc() func(ctx context.Context, id p2p.PeerID, var data interface{} data = msg.Message.GetData() switch data.(type) { - case types.Block: - block := data.(types.Block) + case types.WorkObject: + block := data.(types.WorkObject) backend := *qbe.GetBackend(block.Location()) if backend == nil { log.Global.WithFields(log.Fields{ @@ -172,7 +172,7 @@ func (qbe *QuaiBackend) SetCurrentExpansionNumber(expansionNumber uint8) { } // WriteGenesisBlock adds the genesis block to the database and also writes the block to the disk -func (qbe *QuaiBackend) WriteGenesisBlock(block *types.Block, location common.Location) { +func (qbe *QuaiBackend) WriteGenesisBlock(block *types.WorkObject, location common.Location) { backend := *qbe.GetBackend(location) if backend == nil { log.Global.Error("no backend found") @@ -192,7 +192,7 @@ func (qbe *QuaiBackend) SetSubClient(client *quaiclient.Client, nodeLocation com } // AddGenesisPendingEtxs adds the genesis pending etxs for the given location -func (qbe *QuaiBackend) AddGenesisPendingEtxs(block *types.Block, location common.Location) { +func (qbe *QuaiBackend) AddGenesisPendingEtxs(block *types.WorkObject, location common.Location) { backend := *qbe.GetBackend(location) if backend == nil { log.Global.Error("no backend found") @@ -201,7 +201,7 @@ func (qbe *QuaiBackend) AddGenesisPendingEtxs(block *types.Block, location commo backend.AddGenesisPendingEtxs(block) } -func (qbe *QuaiBackend) LookupBlock(hash common.Hash, location common.Location) *types.Block { +func (qbe *QuaiBackend) LookupBlock(hash common.Hash, location common.Location) *types.WorkObject { if qbe == nil { return nil } diff --git a/quaiclient/ethclient/ethclient.go b/quaiclient/ethclient/ethclient.go index 98bd497083..6199b666db 100644 --- a/quaiclient/ethclient/ethclient.go +++ b/quaiclient/ethclient/ethclient.go @@ -75,7 +75,7 @@ func (ec *Client) ChainID(ctx context.Context) (*big.Int, error) { // // Note that loading full blocks requires two requests. Use HeaderByHash // if you don't need all transactions or uncle headers. -func (ec *Client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { +func (ec *Client) BlockByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) { return ec.getBlock(ctx, "eth_getBlockByHash", hash, true) } @@ -84,7 +84,7 @@ func (ec *Client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Blo // // Note that loading full blocks requires two requests. Use HeaderByNumber // if you don't need all transactions or uncle headers. -func (ec *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { +func (ec *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.WorkObject, error) { return ec.getBlock(ctx, "eth_getBlockByNumber", toBlockNumArg(number), true) } @@ -104,7 +104,7 @@ type rpcBlock struct { InterlinkHashes common.Hashes `json:"interlinkHashes"` } -func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) { +func (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.WorkObject, error) { var raw json.RawMessage err := ec.c.CallContext(ctx, &raw, method, args...) if err != nil { @@ -113,7 +113,7 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface return nil, quai.NotFound } // Decode header and transactions. - var head *types.Header + var head *types.WorkObject var body rpcBlock if err := json.Unmarshal(raw, &head); err != nil { return nil, err @@ -122,9 +122,9 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface return nil, err } // Load uncles because they are not included in the block response. - var uncles []*types.Header + var uncles []*types.WorkObject if len(body.UncleHashes) > 0 { - uncles = make([]*types.Header, len(body.UncleHashes)) + uncles = make([]*types.WorkObject, len(body.UncleHashes)) reqs := make([]rpc.BatchElem, len(body.UncleHashes)) for i := range reqs { reqs[i] = rpc.BatchElem{ @@ -168,7 +168,7 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface } var interlinkHashes common.Hashes copy(interlinkHashes, body.InterlinkHashes) - return types.NewBlockWithHeader(head).WithBody(txs, uncles, etxs, manifest, interlinkHashes), nil + return types.NewWorkObjectWithHeaderAndTx(head.WorkObjectHeader(), nil).WithBody(head.Header(), txs, etxs, uncles, manifest, interlinkHashes), nil } // HeaderByHash returns the block header with the given hash. @@ -325,12 +325,12 @@ func (ec *Client) SyncProgress(ctx context.Context) (*quai.SyncProgress, error) // SubscribeNewHead subscribes to notifications about the current blockchain head // on the given channel. -func (ec *Client) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (quai.Subscription, error) { +func (ec *Client) SubscribeNewHead(ctx context.Context, ch chan<- *types.WorkObject) (quai.Subscription, error) { return ec.c.EthSubscribe(ctx, ch, "newHeads") } // SubscribePendingHeader subscribes to notifications about the current pending block on the node. -func (ec *Client) SubscribePendingHeader(ctx context.Context, ch chan<- *types.Header) (quai.Subscription, error) { +func (ec *Client) SubscribePendingHeader(ctx context.Context, ch chan<- *types.WorkObject) (quai.Subscription, error) { return ec.c.EthSubscribe(ctx, ch, "pendingHeader") } @@ -427,8 +427,8 @@ func (ec *Client) PendingTransactionCount(ctx context.Context) (uint, error) { } // GetPendingHeader gets the latest pending header from the chain. -func (ec *Client) GetPendingHeader(ctx context.Context) (*types.Header, error) { - var pendingHeader *types.Header +func (ec *Client) GetPendingHeader(ctx context.Context) (*types.WorkObject, error) { + var pendingHeader *types.WorkObject err := ec.c.CallContext(ctx, &pendingHeader, "quai_getPendingHeader") if err != nil { return nil, err @@ -436,12 +436,6 @@ func (ec *Client) GetPendingHeader(ctx context.Context) (*types.Header, error) { return pendingHeader, nil } -// ReceiveMinedHeader sends a mined block back to the node -func (ec *Client) ReceiveMinedHeader(ctx context.Context, header *types.Header) error { - data := header.RPCMarshalHeader() - return ec.c.CallContext(ctx, nil, "quai_receiveMinedHeader", data) -} - // Contract Calling // CallContract executes a message call transaction, which is directly executed in the VM diff --git a/quaiclient/quaiclient.go b/quaiclient/quaiclient.go index 9c227808dd..0957494e08 100644 --- a/quaiclient/quaiclient.go +++ b/quaiclient/quaiclient.go @@ -97,15 +97,15 @@ type appendReturns struct { } // SubscribePendingHeader subscribes to notifications about the current pending block on the node. -func (ec *Client) SubscribePendingHeader(ctx context.Context, ch chan<- *types.Header) (quai.Subscription, error) { +func (ec *Client) SubscribePendingHeader(ctx context.Context, ch chan<- *types.WorkObject) (quai.Subscription, error) { return ec.c.QuaiSubscribe(ctx, ch, "pendingHeader") } -func (ec *Client) Append(ctx context.Context, header *types.Header, manifest types.BlockManifest, domPendingHeader *types.Header, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { +func (ec *Client) Append(ctx context.Context, header *types.WorkObject, manifest types.BlockManifest, domPendingHeader *types.WorkObject, domTerminus common.Hash, domOrigin bool, newInboundEtxs types.Transactions) (types.Transactions, bool, bool, error) { fields := map[string]interface{}{ - "header": header.RPCMarshalHeader(), + "header": header.RPCMarshalWorkObject(), "manifest": manifest, - "domPendingHeader": domPendingHeader.RPCMarshalHeader(), + "domPendingHeader": domPendingHeader.RPCMarshalWorkObject(), "domTerminus": domTerminus, "domOrigin": domOrigin, "newInboundEtxs": newInboundEtxs, @@ -136,7 +136,7 @@ func (ec *Client) DownloadBlocksInManifest(ctx context.Context, hash common.Hash } func (ec *Client) SubRelayPendingHeader(ctx context.Context, pendingHeader types.PendingHeader, newEntropy *big.Int, location common.Location, subReorg bool, order int) { - data := map[string]interface{}{"header": pendingHeader.Header().RPCMarshalHeader()} + data := map[string]interface{}{"header": pendingHeader.WorkObject().RPCMarshalWorkObject()} data["NewEntropy"] = newEntropy data["termini"] = pendingHeader.Termini().RPCMarshalTermini() data["Location"] = location @@ -147,7 +147,7 @@ func (ec *Client) SubRelayPendingHeader(ctx context.Context, pendingHeader types } func (ec *Client) UpdateDom(ctx context.Context, oldTerminus common.Hash, pendingHeader types.PendingHeader, location common.Location) { - data := map[string]interface{}{"header": pendingHeader.Header().RPCMarshalHeader()} + data := map[string]interface{}{"header": pendingHeader.WorkObject().RPCMarshalWorkObject()} data["OldTerminus"] = oldTerminus data["Location"] = location data["termini"] = pendingHeader.Termini().RPCMarshalTermini() @@ -163,9 +163,9 @@ func (ec *Client) RequestDomToAppendOrFetch(ctx context.Context, hash common.Has ec.c.CallContext(ctx, nil, "quai_requestDomToAppendOrFetch", data) } -func (ec *Client) NewGenesisPendingHeader(ctx context.Context, header *types.Header, domTerminus common.Hash, genesisHash common.Hash) { - fields := map[string]interface{}{"header": header.RPCMarshalHeader(), "domTerminus": domTerminus, "genesisHash": genesisHash} - ec.c.CallContext(ctx, nil, "quai_newGenesisPendingHeader", fields) +func (ec *Client) NewGenesisPendingHeader(ctx context.Context, header *types.WorkObject, domTerminus common.Hash, genesisHash common.Hash) error { + fields := map[string]interface{}{"header": header.RPCMarshalWorkObject(), "domTerminus": domTerminus, "genesisHash": genesisHash} + return ec.c.CallContext(ctx, nil, "quai_newGenesisPendingHeader", fields) } // GetManifest will get the block manifest ending with the parent hash @@ -222,7 +222,7 @@ func (ec *Client) GetPendingEtxsFromSub(ctx context.Context, hash common.Hash, l func (ec *Client) SendPendingEtxsToDom(ctx context.Context, pEtxs types.PendingEtxs) error { fields := make(map[string]interface{}) - fields["header"] = pEtxs.Header.RPCMarshalHeader() + fields["header"] = pEtxs.Header.RPCMarshalWorkObject() fields["etxs"] = pEtxs.Etxs var raw json.RawMessage err := ec.c.CallContext(ctx, &raw, "quai_sendPendingEtxsToDom", fields) @@ -234,15 +234,15 @@ func (ec *Client) SendPendingEtxsToDom(ctx context.Context, pEtxs types.PendingE func (ec *Client) SendPendingEtxsRollupToDom(ctx context.Context, pEtxsRollup types.PendingEtxsRollup) error { fields := make(map[string]interface{}) - fields["header"] = pEtxsRollup.Header.RPCMarshalHeader() + fields["header"] = pEtxsRollup.Header.RPCMarshalWorkObject() fields["etxsrollup"] = pEtxsRollup.EtxsRollup var raw json.RawMessage return ec.c.CallContext(ctx, &raw, "quai_sendPendingEtxsRollupToDom", fields) } -func (ec *Client) GenerateRecoveryPendingHeader(ctx context.Context, pendingHeader *types.Header, checkpointHashes types.Termini) error { +func (ec *Client) GenerateRecoveryPendingHeader(ctx context.Context, pendingHeader *types.WorkObject, checkpointHashes types.Termini) error { fields := make(map[string]interface{}) - fields["pendingHeader"] = pendingHeader.RPCMarshalHeader() + fields["pendingHeader"] = pendingHeader.RPCMarshalWorkObject() fields["checkpointHashes"] = checkpointHashes.RPCMarshalTermini() return ec.c.CallContext(ctx, nil, "quai_generateRecoveryPendingHeader", fields) } @@ -270,8 +270,8 @@ func (ec *Client) HeaderByNumber(ctx context.Context, number string) *types.Head //// Miner APIS // GetPendingHeader gets the latest pending header from the chain. -func (ec *Client) GetPendingHeader(ctx context.Context) (*types.Header, error) { - var pendingHeader *types.Header +func (ec *Client) GetPendingHeader(ctx context.Context) (*types.WorkObject, error) { + var pendingHeader *types.WorkObject err := ec.c.CallContext(ctx, &pendingHeader, "quai_getPendingHeader") if err != nil { return nil, err @@ -280,8 +280,8 @@ func (ec *Client) GetPendingHeader(ctx context.Context) (*types.Header, error) { } // ReceiveMinedHeader sends a mined block back to the node -func (ec *Client) ReceiveMinedHeader(ctx context.Context, header *types.Header) error { - data := header.RPCMarshalHeader() +func (ec *Client) ReceiveMinedHeader(ctx context.Context, header *types.WorkObject) error { + data := header.RPCMarshalWorkObject() return ec.c.CallContext(ctx, nil, "quai_receiveMinedHeader", data) } diff --git a/quaiclient/transaction_test.go b/quaiclient/transaction_test.go index dd8020f16c..68c6f8c2b1 100644 --- a/quaiclient/transaction_test.go +++ b/quaiclient/transaction_test.go @@ -2,88 +2,111 @@ package quaiclient import ( "context" + "crypto/ecdsa" "math/big" "testing" "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/core/types" - "github.com/dominant-strategies/go-quai/crypto" + goCrypto "github.com/dominant-strategies/go-quai/crypto" + "github.com/dominant-strategies/go-quai/params" "github.com/dominant-strategies/go-quai/quaiclient/ethclient" ) var ( - location = common.Location{0, 1} - PARAMS = params.ChainConfig{ChainID: big.NewInt(1337), Location: location} - wsUrl = "ws://localhost:8101" - wsUrlCyprus2 = "ws://localhost:8101" - MINERTIP = big.NewInt(1 * params.GWei) - BASEFEE = big.NewInt(1 * params.GWei) - GAS = uint64(42000) - VALUE = big.NewInt(10) + location = common.Location{0, 0} + PARAMS = params.ChainConfig{ChainID: big.NewInt(1337), Location: location} + MINERTIP = big.NewInt(1 * params.GWei) + BASEFEE = big.NewInt(1 * params.GWei) + GAS = uint64(420000) + VALUE = big.NewInt(10) ) -func TestETX(t *testing.T) { - fromAddress := common.HexToAddress("0x016E397cf93239A46Ef94Abf3676cb541013F5Fa", location) - privKey, err := crypto.ToECDSA(common.FromHex("0x178bc73569bc321f3941936fa8d5f118cfe6a1fadccbffa329e95dd4692ee2fe")) +func TestTX(t *testing.T) { + + numTests := 1 + fromAddress := make([]common.Address, numTests) + privKey := make([]*ecdsa.PrivateKey, numTests) + toAddress := make([]common.Address, numTests) + // toPrivKey := make([]*ecdsa.PrivateKey, numTests) + wsUrl := make([]string, numTests) + err := error(nil) + fromLocation := make([]common.Location, numTests) + toLocation := make([]common.Location, numTests) + + //cyprus 1 -> cyprus 1 + fromLocation[0] = common.Location{0, 0} + toLocation[0] = common.Location{0, 0} + fromAddress[0] = common.HexToAddress("0x0021358CeaC22936858C3eDa6EB86e0559915550", fromLocation[0]) + privKey[0], err = goCrypto.ToECDSA(common.FromHex("0x7e99ffbdf4b3dda10174f18a0991114bb4a7a684b5972c6901fbe8a4a4bfc325")) if err != nil { t.Fatalf("Failed to convert private key to ECDSA: %v", err) } - from := crypto.PubkeyToAddress(privKey.PublicKey, location) - if !from.Equal(fromAddress) { - t.Fatalf("Failed to convert public key to address: %v", err) - } - - toAddress := common.HexToAddress("0x107569E1b81B7c217062ed10e6d03e6e94a80DaC", common.Location{1, 0}) - toPrivKey, err := crypto.ToECDSA(common.FromHex("0x2541d7f6f17d4c65359bad46d82a48eacce266af8df72b982174f7ef9f934be2")) - if err != nil { - t.Fatalf("Failed to convert private key to ECDSA: %v", err) - } - to := crypto.PubkeyToAddress(toPrivKey.PublicKey, common.Location{1, 0}) - if !to.Equal(toAddress) { - t.Fatalf("Failed to convert public key to address: %v", err) - } + toAddress[0] = common.HexToAddress("0x0147f9CEa7662C567188D58640ffC48901cde02a", toLocation[0]) + // toPrivKey[0], err = goCrypto.ToECDSA(common.FromHex("0x86f3731e698525a27530d4da6d1ae826303bb9b813ee718762b4c3524abddac5")) + // if err != nil { + // t.Fatalf("Failed to convert private key to ECDSA: %v", err) + // } + wsUrl[0] = "ws://localhost:8100" + to := toAddress[0] + + for i := 0; i < numTests; i++ { + from := goCrypto.PubkeyToAddress(privKey[i].PublicKey, fromLocation[i]) + if !from.Equal(fromAddress[i]) { + t.Fatalf("Failed to convert public key to address: %v", err) + } + + // to := goCrypto.PubkeyToAddress(toPrivKey[i].PublicKey, toLocation[i]) + // if !to.Equal(toAddress[i]) { + // t.Fatalf("Failed to convert public key to address: %v", err) + // } + + signer := types.LatestSigner(&PARAMS) + + wsClient, err := ethclient.Dial(wsUrl[i]) + if err != nil { + t.Fatalf("Failed to connect to the Ethereum WebSocket client: %v", err) + } + defer wsClient.Close() + + nonce, err := wsClient.NonceAt(context.Background(), from, nil) + + if err != nil { + t.Error(err.Error()) + t.Fail() + } + + inner_tx := types.QuaiTx{ChainID: PARAMS.ChainID, Nonce: nonce, GasTipCap: MINERTIP, GasFeeCap: BASEFEE, Gas: GAS * 3, To: &to, Value: VALUE, Data: nil, AccessList: types.AccessList{}} + tx := types.NewTx(&inner_tx) + + tx, err = types.SignTx(tx, signer, privKey[i]) + if err != nil { + t.Error(err.Error()) + t.Fail() + } + + t.Log(tx.Hash().String()) + + err = wsClient.SendTransaction(context.Background(), tx) + if err != nil { + t.Error(err.Error()) + t.Fail() + } - signer := types.LatestSigner(&PARAMS) - - wsClient, err := ethclient.Dial(wsUrl) - if err != nil { - t.Fatalf("Failed to connect to the Ethereum WebSocket client: %v", err) } - defer wsClient.Close() - - nonce, err := wsClient.NonceAt(context.Background(), from, nil) - if err != nil { - t.Error(err.Error()) - t.Fail() - } - - inner_tx := types.InternalToExternalTx{ChainID: PARAMS.ChainID, Nonce: nonce, GasTipCap: MINERTIP, Data: []byte{}, ETXData: []byte{}, GasFeeCap: BASEFEE, ETXGasPrice: big.NewInt(500 * params.GWei), ETXGasLimit: 21000, ETXGasTip: big.NewInt(500 * params.GWei), Gas: GAS * 3, To: &to, Value: VALUE, AccessList: types.AccessList{}} - tx := types.NewTx(&inner_tx) - t.Log(tx.Hash().String()) - - tx, err = types.SignTx(tx, signer, privKey) - if err != nil { - t.Error(err.Error()) - t.Fail() - } - - err = wsClient.SendTransaction(context.Background(), tx) - if err != nil { - t.Error(err.Error()) - t.Fail() - } - } func TestGetBalance(t *testing.T) { + wsUrl := "ws://localhost:8100" + wsUrlCyprus2 := "ws://localhost:8101" wsClientCyprus1, err := ethclient.Dial(wsUrl) if err != nil { t.Fatalf("Failed to connect to the Ethereum WebSocket client: %v", err) } defer wsClientCyprus1.Close() - balance, err := wsClientCyprus1.BalanceAt(context.Background(), common.HexToAddress("0x007c0C63038D8E099D6CDe00BBec41ca0d940D40", common.Location{0, 0}), nil) + balance, err := wsClientCyprus1.BalanceAt(context.Background(), common.HexToAddress("0x0047f9CEa7662C567188D58640ffC48901cde02a", common.Location{0, 0}), nil) if err != nil { t.Error(err.Error()) t.Fail() @@ -96,7 +119,7 @@ func TestGetBalance(t *testing.T) { } defer wsClientCyprus2.Close() - balance, err = wsClientCyprus2.BalanceAt(context.Background(), common.HexToAddress("0x010978987B569072744dc9426E76590eb6fCfE8B", common.Location{0, 1}), nil) + balance, err = wsClientCyprus2.BalanceAt(context.Background(), common.HexToAddress("0x01736f9273a0dF59619Ea4e17c284b422561819e", common.Location{0, 1}), nil) if err != nil { t.Error(err.Error()) t.Fail() diff --git a/quaistats/quaistats.go b/quaistats/quaistats.go index c9c0d93cec..dc6ff20796 100644 --- a/quaistats/quaistats.go +++ b/quaistats/quaistats.go @@ -90,9 +90,9 @@ var ( type backend interface { SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription - CurrentHeader() *types.Header - TotalLogS(header *types.Header) *big.Int - HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) + CurrentHeader() *types.WorkObject + TotalLogS(header *types.WorkObject) *big.Int + HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) Stats() (pending int, queued int) ChainConfig() *params.ChainConfig ProcessingState() bool @@ -105,9 +105,9 @@ type backend interface { // reporting to quaistats type fullNodeBackend interface { backend - BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) - BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) - CurrentBlock() *types.Block + BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.WorkObject, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.WorkObject, error) + CurrentBlock() *types.WorkObject } // Service implements an Quai netstats reporting daemon that pushes local @@ -478,7 +478,7 @@ func (s *Service) initializeURLMap() map[string]string { } } -func (s *Service) handleBlock(block *types.Block) { +func (s *Service) handleBlock(block *types.WorkObject) { // Cache Block s.backend.Logger().WithFields(log.Fields{ "detailsQueueSize": s.detailStatsQueue.Size(), @@ -501,7 +501,7 @@ func (s *Service) handleBlock(block *types.Block) { s.appendTimeStatsQueue.Enqueue(appStats) } - if block.NumberU64(s.backend.NodeCtx())%c_txBatchSize == 0 && s.sendfullstats && block.Header().Location().Context() == common.ZONE_CTX { + if block.NumberU64(s.backend.NodeCtx())%c_txBatchSize == 0 && s.sendfullstats && block.Location().Context() == common.ZONE_CTX { txStats := s.assembleBlockTransactionStats(block) if txStats != nil { s.transactionStatsQueue.Enqueue(txStats) @@ -1024,7 +1024,7 @@ type BatchObject struct { OldestBlockTime uint64 } -func (s *Service) cacheBlock(block *types.Block) cachedBlock { +func (s *Service) cacheBlock(block *types.WorkObject) cachedBlock { currentBlock := cachedBlock{ number: block.NumberU64(s.backend.NodeCtx()), parentHash: block.ParentHash(s.backend.NodeCtx()), @@ -1035,7 +1035,7 @@ func (s *Service) cacheBlock(block *types.Block) cachedBlock { return currentBlock } -func (s *Service) calculateTPS(block *types.Block) *tps { +func (s *Service) calculateTPS(block *types.WorkObject) *tps { var totalTransactions1h uint64 var totalTransactions1m uint64 var currentBlock interface{} @@ -1107,30 +1107,28 @@ func (s *Service) calculateTPS(block *types.Block) *tps { } } -func (s *Service) assembleBlockDetailStats(block *types.Block) *blockDetailStats { +func (s *Service) assembleBlockDetailStats(block *types.WorkObject) *blockDetailStats { if block == nil { return nil } - header := block.Header() - difficulty := header.Difficulty().String() + difficulty := block.Difficulty().String() // Assemble and return the block stats return &blockDetailStats{ - Timestamp: new(big.Int).SetUint64(header.Time()), - ZoneHeight: header.NumberU64(2), - RegionHeight: header.NumberU64(1), - PrimeHeight: header.NumberU64(0), + Timestamp: new(big.Int).SetUint64(block.Time()), + ZoneHeight: block.NumberU64(2), + RegionHeight: block.NumberU64(1), + PrimeHeight: block.NumberU64(0), Chain: s.backend.NodeLocation().Name(), - Entropy: common.BigBitsToBits(s.backend.TotalLogS(block.Header())).String(), + Entropy: common.BigBitsToBits(s.backend.TotalLogS(block)).String(), Difficulty: difficulty, } } -func (s *Service) assembleBlockAppendTimeStats(block *types.Block) *blockAppendTime { +func (s *Service) assembleBlockAppendTimeStats(block *types.WorkObject) *blockAppendTime { if block == nil { return nil } - header := block.Header() appendTime := block.GetAppendTime() s.backend.Logger().WithField("appendTime", appendTime.Microseconds()).Info("Raw Block Append Time") @@ -1138,16 +1136,15 @@ func (s *Service) assembleBlockAppendTimeStats(block *types.Block) *blockAppendT // Assemble and return the block stats return &blockAppendTime{ AppendTime: appendTime, - BlockNumber: header.Number(s.backend.NodeCtx()), + BlockNumber: block.Number(s.backend.NodeCtx()), Chain: s.backend.NodeLocation().Name(), } } -func (s *Service) assembleBlockTransactionStats(block *types.Block) *blockTransactionStats { +func (s *Service) assembleBlockTransactionStats(block *types.WorkObject) *blockTransactionStats { if block == nil { return nil } - header := block.Header() tps := s.calculateTPS(block) if tps == nil { return nil @@ -1155,7 +1152,7 @@ func (s *Service) assembleBlockTransactionStats(block *types.Block) *blockTransa // Assemble and return the block stats return &blockTransactionStats{ - Timestamp: new(big.Int).SetUint64(header.Time()), + Timestamp: new(big.Int).SetUint64(block.Time()), TotalNoTransactions1h: tps.TotalNumberTransactions1h, TPS1m: tps.TPS1m, TPS1hr: tps.TPS1hr, diff --git a/trie/proto_trienode.pb.go b/trie/proto_trienode.pb.go index c306fde3df..aa42437cdd 100644 --- a/trie/proto_trienode.pb.go +++ b/trie/proto_trienode.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.32.0 -// protoc v4.25.2 +// protoc-gen-go v1.30.0 +// protoc v4.25.1 // source: trie/proto_trienode.proto package trie