From ea81e49a7070a978886fee48c5dd82c45577497f Mon Sep 17 00:00:00 2001 From: gop Date: Mon, 11 Nov 2024 15:11:59 -0600 Subject: [PATCH] Storing the best node set on stop and retrieving on launch --- cmd/utils/hierarchical_coordinator.go | 149 +++++++++++++++++++------- core/headerchain.go | 2 +- params/config.go | 17 +-- quai/backend.go | 1 + 4 files changed, 119 insertions(+), 50 deletions(-) diff --git a/cmd/utils/hierarchical_coordinator.go b/cmd/utils/hierarchical_coordinator.go index a0b43ff5f8..39b58c6f09 100644 --- a/cmd/utils/hierarchical_coordinator.go +++ b/cmd/utils/hierarchical_coordinator.go @@ -36,6 +36,7 @@ const ( var ( c_currentExpansionNumberKey = []byte("cexp") + c_bestNodeKey = []byte("best") ) type Node struct { @@ -45,10 +46,61 @@ type Node struct { entropy *big.Int } +func (ch *Node) ProtoEncode() *ProtoNode { + protoNumber := make([][]byte, common.HierarchyDepth) + for i, num := range ch.number { + protoNumber[i] = num.Bytes() + } + protoNode := &ProtoNode{ + Hash: ch.hash.ProtoEncode(), + Number: protoNumber, + Location: ch.location.ProtoEncode(), + Entropy: ch.entropy.Bytes(), + } + return protoNode +} + +func (ch *Node) ProtoDecode(protoNode *ProtoNode) { + hash := &common.Hash{} + hash.ProtoDecode(protoNode.GetHash()) + ch.hash = *hash + + number := make([]*big.Int, common.HierarchyDepth) + for i, num := range protoNode.GetNumber() { + number[i] = new(big.Int).SetBytes(num) + } + ch.number = number + + location := &common.Location{} + location.ProtoDecode(protoNode.GetLocation()) + ch.location = *location + + ch.entropy = new(big.Int).SetBytes(protoNode.GetEntropy()) +} + type NodeSet struct { nodes map[string]Node } +func (ns *NodeSet) ProtoEncode() *ProtoNodeSet { + protoNodeSet := &ProtoNodeSet{} + protoNodeSet.NodeSet = make(map[string]*ProtoNode) + + for loc, node := range ns.nodes { + node := node.ProtoEncode() + protoNodeSet.NodeSet[loc] = node + } + return protoNodeSet +} + +func (ns *NodeSet) ProtoDecode(protoNodeSet *ProtoNodeSet) { + for loc, protoNode := range protoNodeSet.NodeSet { + node := &Node{} + node.ProtoDecode(protoNode) + ns.nodes[loc] = *node + } +} + func (ch *Node) Empty() bool { return ch.hash == common.Hash{} && ch.location.Equal(common.Location{}) && ch.entropy == nil } @@ -137,6 +189,8 @@ func (hc *HierarchicalCoordinator) InitPendingHeaders() { } } hc.Add(new(big.Int).SetUint64(0), nodeSet, hc.pendingHeaders) + + hc.LoadBestNodeSet() } func (hc *HierarchicalCoordinator) Add(entropy *big.Int, node NodeSet, newPendingHeaders *PendingHeaders) { @@ -208,11 +262,14 @@ func (ns *NodeSet) Extendable(wo *types.WorkObject, order int) bool { func (ns *NodeSet) Entropy(numRegions int, numZones int) *big.Int { entropy := new(big.Int) - entropy.Add(entropy, ns.nodes[common.Location{}.Name()].entropy) + primeEntropy := ns.nodes[common.Location{}.Name()].entropy + entropy.Add(entropy, primeEntropy) for i := 0; i < numRegions; i++ { - entropy.Add(entropy, ns.nodes[common.Location{byte(i)}.Name()].entropy) + regionEntropy := ns.nodes[common.Location{byte(i)}.Name()].entropy + entropy.Add(entropy, regionEntropy) for j := 0; j < numZones; j++ { - entropy.Add(entropy, ns.nodes[common.Location{byte(i), byte(j)}.Name()].entropy) + zoneEntropy := ns.nodes[common.Location{byte(i), byte(j)}.Name()].entropy + entropy.Add(entropy, zoneEntropy) } } @@ -430,6 +487,7 @@ func (hc *HierarchicalCoordinator) Stop() { for _, chainEventSub := range hc.chainSubs { chainEventSub.Unsubscribe() } + hc.StoreBestNodeSet() hc.expansionSub.Unsubscribe() hc.db.Close() hc.wg.Wait() @@ -602,44 +660,6 @@ func (hc *HierarchicalCoordinator) ChainEventLoop(chainEvent chan core.ChainEven for { select { case head := <-chainEvent: - // If this is the first block we have after a restart, then we can - // add this block into the node set directly - // Since on startup we initialize the pending headers cache with the - // genesis block, we can check and see if we are in that state - // We can do that by checking the length of the pendding headers order - // cache length is 1 - if len(hc.pendingHeaders.order) == 1 { - // create a nodeset on this block - nodeSet := NodeSet{ - nodes: make(map[string]Node), - } - - //Initialize for prime - backend := hc.GetBackend(common.Location{}) - entropy := backend.TotalLogEntropy(head.Block) - newNode := Node{ - hash: head.Block.ParentHash(common.PRIME_CTX), - number: head.Block.NumberArray(), - location: common.Location{}, - entropy: entropy, - } - nodeSet.nodes[common.Location{}.Name()] = newNode - - regionLocation := common.Location{byte(head.Block.Location().Region())} - backend = hc.GetBackend(regionLocation) - newNode.hash = head.Block.ParentHash(common.REGION_CTX) - newNode.location = regionLocation - newNode.entropy = entropy - nodeSet.nodes[regionLocation.Name()] = newNode - - zoneLocation := head.Block.Location() - backend = hc.GetBackend(zoneLocation) - newNode.hash = head.Block.ParentHash(common.ZONE_CTX) - newNode.location = zoneLocation - newNode.entropy = entropy - nodeSet.nodes[zoneLocation.Name()] = newNode - hc.Add(entropy, nodeSet, hc.pendingHeaders) - } go hc.ReapplicationLoop(head) go hc.ComputeMapPending(head) @@ -1321,3 +1341,50 @@ func (hc *HierarchicalCoordinator) GetBackendForLocationAndOrder(location common } return nil } + +func (hc *HierarchicalCoordinator) StoreBestNodeSet() { + + log.Global.Info("Storing the best node set on stop") + + bestNode, exists := hc.pendingHeaders.collection.Get(hc.bestEntropy.String()) + if !exists { + log.Global.Error("best entropy node set doesnt exist in the pending headers collection") + } + + protoBestNode := bestNode.ProtoEncode() + + data, err := proto.Marshal(protoBestNode) + if err != nil { + log.Global.Error("Error marshalling best node, err: ", err) + return + } + + err = hc.db.Put(c_bestNodeKey, data, nil) + if err != nil { + log.Global.Error("Error storing the best node key, err: ", err) + return + } +} + +func (hc *HierarchicalCoordinator) LoadBestNodeSet() { + data, err := hc.db.Get(c_bestNodeKey, nil) + if err != nil { + log.Global.Error("Error loading the best node, err: ", err) + return + } + + protoNodeSet := &ProtoNodeSet{} + err = proto.Unmarshal(data, protoNodeSet) + if err != nil { + log.Global.Error("Error unmarshalling the proto node set, err: ", err) + return + } + + nodeSet := NodeSet{} + nodeSet.nodes = make(map[string]Node) + nodeSet.ProtoDecode(protoNodeSet) + + numRegions, numZones := common.GetHierarchySizeForExpansionNumber(hc.currentExpansionNumber) + hc.bestEntropy = nodeSet.Entropy(int(numRegions), int(numZones)) + hc.Add(hc.bestEntropy, nodeSet, hc.pendingHeaders) +} diff --git a/core/headerchain.go b/core/headerchain.go index da1408817e..aff75c4aa3 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -1224,7 +1224,7 @@ func (hc *HeaderChain) ComputeExpansionNumber(parent *types.WorkObject) (uint8, } // If the Prime Terminus is genesis the expansion number is the genesis expansion number - if hc.IsGenesisHash(primeTerminusHash) && hc.NodeLocation().Equal(common.Location{0, 0}) { + if hc.IsGenesisHash(primeTerminusHash) && hc.NodeLocation().Equal(common.Location{0, 0}) || hc.config.StartingExpansionNumber != 0 { return primeTerminus.ExpansionNumber(), nil } else { // check if the prime terminus is the block where the threshold count diff --git a/params/config.go b/params/config.go index dbb147558d..e0b527320d 100644 --- a/params/config.go +++ b/params/config.go @@ -112,9 +112,9 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllProgpowProtocolChanges = &ChainConfig{big.NewInt(1337), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false} + AllProgpowProtocolChanges = &ChainConfig{big.NewInt(1337), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false, 0} - TestChainConfig = &ChainConfig{big.NewInt(1), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false} + TestChainConfig = &ChainConfig{big.NewInt(1), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Location{}, common.Hash{}, false, 0} TestRules = TestChainConfig.Rules(new(big.Int)) ) @@ -126,12 +126,13 @@ var ( type ChainConfig struct { ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection // Various consensus engines - ConsensusEngine string - Blake3Pow *Blake3powConfig `json:"blake3pow,omitempty"` - Progpow *ProgpowConfig `json:"progpow,omitempty"` - Location common.Location - DefaultGenesisHash common.Hash - IndexAddressUtxos bool + ConsensusEngine string + Blake3Pow *Blake3powConfig `json:"blake3pow,omitempty"` + Progpow *ProgpowConfig `json:"progpow,omitempty"` + Location common.Location + DefaultGenesisHash common.Hash + IndexAddressUtxos bool + StartingExpansionNumber uint64 } // SetLocation sets the location on the chain config diff --git a/quai/backend.go b/quai/backend.go index ce21140b0e..b46dde4557 100644 --- a/quai/backend.go +++ b/quai/backend.go @@ -181,6 +181,7 @@ func New(stack *node.Node, p2p NetworkingAPI, config *quaiconfig.Config, nodeCtx chainConfig.Location = config.NodeLocation // TODO: See why this is necessary chainConfig.DefaultGenesisHash = config.DefaultGenesisHash chainConfig.IndexAddressUtxos = config.IndexAddressUtxos + chainConfig.StartingExpansionNumber = startingExpansionNumber logger.WithFields(log.Fields{ "Ctx": nodeCtx, "NodeLocation": config.NodeLocation,