Skip to content

Commit

Permalink
Added interlink hashs to body and header
Browse files Browse the repository at this point in the history
  • Loading branch information
gameofpointers committed Apr 1, 2024
1 parent 99b00d4 commit 4701419
Show file tree
Hide file tree
Showing 18 changed files with 671 additions and 409 deletions.
9 changes: 9 additions & 0 deletions common/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (

"github.com/dominant-strategies/go-quai/common/hexutil"
"github.com/dominant-strategies/go-quai/log"
"github.com/dominant-strategies/go-quai/rlp"
)

// Lengths of hashes and addresses in bytes.
Expand All @@ -51,6 +52,7 @@ const (
MaxZones = 16
MaxWidth = 16
MaxExpansionNumber = 32
InterlinkDepth = 4
)

var (
Expand Down Expand Up @@ -224,6 +226,13 @@ func (h *Hashes) ProtoDecode(hashes *ProtoHashes) {
*h = res
}

// Len returns the length of h.
func (h Hashes) Len() int { return len(h) }

func (h Hashes) EncodeIndex(i int, w *bytes.Buffer) {
rlp.Encode(w, h[i])
}

/////////// Address

type addrPrefixRange struct {
Expand Down
33 changes: 33 additions & 0 deletions consensus/blake3pow/poem.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package blake3pow

import (
"errors"
"math"
"math/big"

"github.com/dominant-strategies/go-quai/common"
Expand Down Expand Up @@ -170,3 +172,34 @@ func (blake3pow *Blake3pow) UncledSubDeltaLogS(chain consensus.GenesisReader, he
}
return big.NewInt(0)
}

// CalcRank returns the rank of the block within the hierarchy of chains, this
// determines the level of the interlink
func (blake3pow *Blake3pow) CalcRank(chain consensus.GenesisReader, header *types.Header) (int, error) {
if chain.IsGenesisHash(header.Hash()) {
return 0, nil
}
_, order, err := blake3pow.CalcOrder(header)
if err != nil {
return 0, err
}
if order != common.PRIME_CTX {
return 0, errors.New("rank cannot be computed for a non-prime block")
}

powHash := header.Hash()
target := new(big.Int).Div(common.Big2e256, header.Difficulty())
zoneThresholdS := blake3pow.IntrinsicLogS(common.BytesToHash(target.Bytes()))

intrinsicS := blake3pow.IntrinsicLogS(powHash)
for i := common.InterlinkDepth; i > 0; i-- {
extraBits := math.Pow(2, float64(i))
primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(big.NewInt(int64(extraBits))))
primeBlockEntropyThreshold = new(big.Int).Add(primeBlockEntropyThreshold, common.BitsToBigBits(params.PrimeEntropyTarget))
if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 {
return i, nil
}
}

return 0, nil
}
3 changes: 3 additions & 0 deletions consensus/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ type Engine interface {
// UncledUncledSubDeltaLogS returns the log of the uncled entropy reduction since the past coincident
UncledSubDeltaLogS(chain GenesisReader, header *types.Header) *big.Int

// CalcRank calculates the rank of the prime block
CalcRank(chain GenesisReader, header *types.Header) (int, error)

ComputePowLight(header *types.Header) (mixHash, powHash common.Hash)

// VerifyHeader checks whether a header conforms to the consensus rules of a
Expand Down
37 changes: 37 additions & 0 deletions consensus/progpow/poem.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package progpow

import (
"errors"
"math"
"math/big"

"github.com/dominant-strategies/go-quai/common"
Expand Down Expand Up @@ -166,3 +168,38 @@ func (progpow *Progpow) UncledSubDeltaLogS(chain consensus.GenesisReader, header
}
return big.NewInt(0)
}

// CalcRank returns the rank of the block within the hierarchy of chains, this
// determines the level of the interlink
func (progpow *Progpow) CalcRank(chain consensus.GenesisReader, header *types.Header) (int, error) {
if chain.IsGenesisHash(header.Hash()) {
return 0, nil
}
_, order, err := progpow.CalcOrder(header)
if err != nil {
return 0, err
}
if order != common.PRIME_CTX {
return 0, errors.New("rank cannot be computed for a non-prime block")
}

// Verify the seal and get the powHash for the given header
powHash, err := progpow.verifySeal(header)
if err != nil {
return 0, err
}

target := new(big.Int).Div(common.Big2e256, header.Difficulty())
zoneThresholdS := progpow.IntrinsicLogS(common.BytesToHash(target.Bytes()))

intrinsicS := progpow.IntrinsicLogS(powHash)
for i := common.InterlinkDepth; i > 0; i-- {
extraBits := math.Pow(2, float64(i))
primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(big.NewInt(int64(extraBits))))
primeBlockEntropyThreshold = new(big.Int).Add(primeBlockEntropyThreshold, common.BitsToBigBits(params.PrimeEntropyTarget))
if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 {
return i, nil
}
}
return 0, nil
}
6 changes: 6 additions & 0 deletions core/block_validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error {
// If we have a subordinate chain, it is impossible for the subordinate manifest to be empty
return ErrBadSubManifest
}
if nodeCtx == common.PRIME_CTX {
interlinkRootHash := types.DeriveSha(block.InterlinkHashes(), trie.NewStackTrie(nil))
if interlinkRootHash != header.InterlinkRootHash() {
return ErrBadInterlink
}
}
} else {
// Header validity is known at this point, check the uncles and transactions
if err := v.engine.VerifyUncles(v.hc, block); err != nil {
Expand Down
3 changes: 3 additions & 0 deletions core/error.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ var (
// ErrBadSubManifest is returned when a block's subordinate manifest does not match the subordinate manifest hash
ErrBadSubManifest = errors.New("subordinate manifest is incorrect")

// ErrBadInterlink is returned when a block's interlink does not match the interlink hash
ErrBadInterlink = errors.New("interlink is incorrect")

//ErrPendingBlock indicates the block couldn't yet be processed. This is likely due to missing information (ancestor, body, pendingEtxs, etc)
ErrPendingBlock = errors.New("block cannot be appended yet")

Expand Down
11 changes: 11 additions & 0 deletions core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,17 @@ func (hc *HeaderChain) AppendHeader(header *types.Header) error {
}
}

// Verify the Interlink root hash matches the interlink
if nodeCtx == common.PRIME_CTX {
interlinkHashes := rawdb.ReadInterlinkHashes(hc.headerDb, header.ParentHash(nodeCtx))
if interlinkHashes == nil {
return errors.New("interlink hashes not found")
}
if header.InterlinkRootHash() != types.DeriveSha(interlinkHashes, trie.NewStackTrie(nil)) {
return errors.New("interlink root hash does not match interlink")
}
}

return nil
}
func (hc *HeaderChain) ProcessingState() bool {
Expand Down
42 changes: 39 additions & 3 deletions core/rawdb/accessors_chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -881,7 +881,7 @@ func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64, location common
if body == nil {
return nil
}
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles, body.ExtTransactions, body.SubManifest)
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles, body.ExtTransactions, body.SubManifest, body.InterlinkHashes)
}

// WriteBlock serializes a block into the database, header and body separately.
Expand Down Expand Up @@ -993,7 +993,7 @@ func ReadBadBlock(db ethdb.Reader, hash common.Hash, location common.Location) *
}
for _, bad := range *badBlocks {
if bad.Header.Hash() == hash {
return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles, bad.Body.ExtTransactions, bad.Body.SubManifest)
return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles, bad.Body.ExtTransactions, bad.Body.SubManifest, bad.Body.InterlinkHashes)
}
}
return nil
Expand All @@ -1020,7 +1020,7 @@ func ReadAllBadBlocks(db ethdb.Reader, location common.Location) []*types.Block
}
var blocks []*types.Block
for _, bad := range *badBlocks {
blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles, bad.Body.ExtTransactions, bad.Body.SubManifest))
blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles, bad.Body.ExtTransactions, bad.Body.SubManifest, bad.Body.InterlinkHashes))
}
return blocks
}
Expand Down Expand Up @@ -1407,6 +1407,42 @@ func DeleteManifest(db ethdb.KeyValueWriter, hash common.Hash) {
}
}

// ReadInterlinkHashes retreives the interlinkhashes corresponding to a given block
func ReadInterlinkHashes(db ethdb.Reader, hash common.Hash) common.Hashes {
// Try to look up the data in leveldb.
data, _ := db.Get(interlinkHashKey(hash))
if len(data) == 0 {
return nil
}
protoInterlinkHashes := new(common.ProtoHashes)
err := proto.Unmarshal(data, protoInterlinkHashes)
if err != nil {
log.Global.WithField("err", err).Fatal("Failed to proto Unmarshal interlink hashes")
}
interlinkHashes := new(common.Hashes)
interlinkHashes.ProtoDecode(protoInterlinkHashes)
return *interlinkHashes
}

// WriteInterlinkHashes stores the interlink hashes corresponding to a given block
func WriteInterlinkHashes(db ethdb.KeyValueWriter, hash common.Hash, interlinkHashes common.Hashes) {
protoInterlinkHashes := interlinkHashes.ProtoEncode()
data, err := proto.Marshal(protoInterlinkHashes)
if err != nil {
log.Global.WithField("err", err).Fatal("Failed to proto Marshal interlink hashes")
}
if err := db.Put(interlinkHashKey(hash), data); err != nil {
log.Global.WithField("err", err).Fatal("Failed to store interlink hashes")
}
}

// DeleteInterlinkHashes removes interlinkHashes data associated with a block.
func DeleteInterlinkHashes(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(interlinkHashKey(hash)); err != nil {
log.Global.WithField("err", err).Fatal("Failed to delete interlink hashes")
}
}

// ReadBloomProto retrieves the bloom for the given block, in bytes
func ReadBloomProto(db ethdb.Reader, hash common.Hash) []byte {
// Try to look up the data in leveldb.
Expand Down
6 changes: 6 additions & 0 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ var (
pendingEtxsPrefix = []byte("pe") // pendingEtxsPrefix + hash -> PendingEtxs at block
pendingEtxsRollupPrefix = []byte("pr") // pendingEtxsRollupPrefix + hash -> PendingEtxsRollup at block
manifestPrefix = []byte("ma") // manifestPrefix + hash -> Manifest at block
interlinkPrefix = []byte("il") // interlinkPrefix + hash -> Interlink at block
bloomPrefix = []byte("bl") // bloomPrefix + hash -> bloom at block

txLookupPrefix = []byte("l") // txLookupPrefix + hash -> transaction/receipt lookup metadata
Expand Down Expand Up @@ -337,6 +338,11 @@ func manifestKey(hash common.Hash) []byte {
return append(manifestPrefix, hash.Bytes()...)
}

// interlinkHashKey = interlinkPrefix + hash
func interlinkHashKey(hash common.Hash) []byte {
return append(interlinkPrefix, hash.Bytes()...)
}

func bloomKey(hash common.Hash) []byte {
return append(bloomPrefix, hash.Bytes()...)
}
Expand Down
20 changes: 17 additions & 3 deletions core/slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -1263,7 +1263,11 @@ func (sl *Slice) ConstructLocalBlock(header *types.Header) (*types.Block, error)
for i, blockHash := range pendingBlockBody.SubManifest {
subManifest[i] = blockHash
}
block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest)
interlinkHashes := make(common.Hashes, len(pendingBlockBody.InterlinkHashes))
for i, interlinkhash := range pendingBlockBody.InterlinkHashes {
interlinkHashes[i] = interlinkhash
}
block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest, interlinkHashes)
if err := sl.validator.ValidateBody(block); err != nil {
return block, err
} else {
Expand All @@ -1283,7 +1287,12 @@ func (sl *Slice) ConstructLocalMinedBlock(header *types.Header) (*types.Block, e
return nil, ErrBodyNotFound
}
} else {
pendingBlockBody = &types.Body{}
// If the context is PRIME, there is the interlink hashes that needs to be returned from the database
var interlinkHashes common.Hashes
if nodeCtx == common.PRIME_CTX {
interlinkHashes = rawdb.ReadInterlinkHashes(sl.sliceDb, header.ParentHash(common.PRIME_CTX))
}
pendingBlockBody = &types.Body{InterlinkHashes: interlinkHashes}
}
// Load uncles because they are not included in the block response.
txs := make([]*types.Transaction, len(pendingBlockBody.Transactions))
Expand All @@ -1303,7 +1312,11 @@ func (sl *Slice) ConstructLocalMinedBlock(header *types.Header) (*types.Block, e
for i, blockHash := range pendingBlockBody.SubManifest {
subManifest[i] = blockHash
}
block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest)
interlinkhashes := make(common.Hashes, len(pendingBlockBody.InterlinkHashes))
for i, interlinkhash := range pendingBlockBody.InterlinkHashes {
interlinkhashes[i] = interlinkhash
}
block := types.NewBlockWithHeader(header).WithBody(txs, uncles, etxs, subManifest, interlinkhashes)
if err := sl.validator.ValidateBody(block); err != nil {
return block, err
} else {
Expand All @@ -1328,6 +1341,7 @@ func (sl *Slice) combinePendingHeader(header *types.Header, slPendingHeader *typ
combinedPendingHeader.SetThresholdCount(header.ThresholdCount())
combinedPendingHeader.SetExpansionNumber(header.ExpansionNumber())
combinedPendingHeader.SetEtxEligibleSlices(header.EtxEligibleSlices())
combinedPendingHeader.SetInterlinkRootHash(header.InterlinkRootHash())
}

if inSlice {
Expand Down
Loading

0 comments on commit 4701419

Please sign in to comment.