Skip to content
This repository has been archived by the owner on Mar 8, 2024. It is now read-only.

Quai client tests #96

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,6 @@ nodelogs/
*/config.toml
*/private.key
.vscode/
.idea*
testlogs/
.idea*
*.cov
3 changes: 2 additions & 1 deletion consensus/progpow/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,8 @@ func (progpow *Progpow) verifySeal(header *types.Header) (common.Hash, error) {
if progpow.fakeFail == header.NumberU64(nodeCtx) {
return common.Hash{}, errInvalidPoW
}
return common.Hash{}, nil
//if hash is empty here, it fails because of div / 0 on poem.go: IntrinsicLogS()
return common.HexToHash("0xf5d8c9fb1a61e47c6dd4b5d0a1a0d6c0f7bce9cfae0e2a9d8a9c8d6d6f8f4f7"), nil
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what is this hash?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a random hash.
The hash just cant be zero because it leads to a div / 0.

}
// If we're running a shared PoW, delegate verification to it
if progpow.shared != nil {
Expand Down
6 changes: 4 additions & 2 deletions consensus/progpow/progpow.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,11 +219,13 @@ func NewTester(notify []string, noverify bool) *Progpow {
// NewFaker creates a progpow consensus engine with a fake PoW scheme that accepts
// all blocks' seal as valid, though they still have to conform to the Quai
// consensus rules.
func NewFaker() *Progpow {
func NewFaker(logger *log.Logger, nodeLocation common.Location) *Progpow {
return &Progpow{
config: Config{
PowMode: ModeFake,
PowMode: ModeFake,
NodeLocation: nodeLocation,
},
logger: logger,
}
}

Expand Down
22 changes: 13 additions & 9 deletions core/chain_makers.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,13 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/consensus"
"github.com/dominant-strategies/go-quai/consensus/misc"
"github.com/dominant-strategies/go-quai/core/rawdb"
"github.com/dominant-strategies/go-quai/core/state"
"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/core/vm"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/params"
"github.com/dominant-strategies/go-quai/trie"
)

// BlockGen creates blocks for testing.
Expand Down Expand Up @@ -215,6 +217,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
chainreader := &fakeChainReader{config: config}
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.subManifest = types.BlockManifest{parent.Hash()}
b.header = makeHeader(chainreader, parent, statedb, b.engine)

// Execute any user modifications to the block
Expand Down Expand Up @@ -250,6 +253,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
panic(err)
}
block, receipt := genblock(i, parent, statedb)
rawdb.WriteBlock(db, block, config.Location.Context())
blocks[i] = block
receipts[i] = receipt
parent = block
Expand All @@ -264,26 +268,26 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S
} else {
time = parent.Time() + 10 // block time is fixed at 10 seconds
}
nodeCtx := chain.Config().Location.Context()

// Temporary header values just to calc difficulty
diffheader := types.EmptyHeader()
diffheader.SetDifficulty(parent.Difficulty(nodeCtx))
diffheader.SetNumber(parent.Number(nodeCtx), nodeCtx)
diffheader.SetTime(time - 10)
diffheader.SetUncleHash(parent.UncleHash())
nodeLoc := chain.Config().Location
nodeCtx := nodeLoc.Context()

// Make new header
header := types.EmptyHeader()
header.SetUTXORoot(state.UTXORoot())
header.SetEVMRoot(state.IntermediateRoot(true))
header.SetParentHash(parent.Hash(), nodeCtx)
header.SetCoinbase(parent.Coinbase())
header.SetDifficulty(engine.CalcDifficulty(chain, diffheader))
header.SetDifficulty(engine.CalcDifficulty(chain, parent.Header()))
header.SetGasLimit(parent.GasLimit())
header.SetNumber(new(big.Int).Add(parent.Number(nodeCtx), common.Big1), nodeCtx)
header.SetTime(time)
header.SetBaseFee(misc.CalcBaseFee(chain.Config(), parent.Header()))

header.SetLocation(nodeLoc)

manifest := types.BlockManifest{parent.Hash()}
header.SetManifestHash(types.DeriveSha(manifest, trie.NewStackTrie(nil)), nodeCtx)

return header
}

Expand Down
25 changes: 23 additions & 2 deletions core/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,18 @@ type IndexerConfig struct {
IndexAddressUtxos bool
}

type NewCoreFunction func(db ethdb.Database, config *Config, isLocalBlock func(block *types.Header) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, indexerConfig *IndexerConfig, genesis *Genesis, logger *log.Logger) (*Core, error)

func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.Header) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, indexerConfig *IndexerConfig, genesis *Genesis, logger *log.Logger) (*Core, error) {
slice, err := NewSlice(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, domClientUrl, subClientUrls, engine, cacheConfig, indexerConfig, vmConfig, genesis, logger)
if err != nil {
return nil, err
}

return newCommonCore(slice, engine, logger)
}

func newCommonCore(slice *Slice, engine consensus.Engine, logger *log.Logger) (*Core, error) {
c := &Core{
sl: slice,
engine: engine,
Expand All @@ -103,6 +109,23 @@ func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.H
// Initialize the sync target to current header parent entropy
c.syncTarget = c.CurrentHeader()

c.AppendQueueProcessCache()

return c, nil
}

// Used on unit testing
func NewFakeCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.Header) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, indexerConfig *IndexerConfig, genesis *Genesis, logger *log.Logger) (*Core, error) {
slice, err := NewFakeSlice(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, domClientUrl, subClientUrls, engine, cacheConfig, indexerConfig, vmConfig, genesis, logger)

if err != nil {
return nil, err
}

return newCommonCore(slice, engine, logger)
}

func (c *Core) AppendQueueProcessCache() {
appendQueue, _ := lru.New(c_maxAppendQueue)
c.appendQueue = appendQueue

Expand All @@ -115,8 +138,6 @@ func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.H
go c.updateAppendQueue()
go c.startStatsTimer()
go c.checkSyncTarget()

return c, nil
}

// InsertChain attempts to append a list of blocks to the slice, optionally
Expand Down
1 change: 1 addition & 0 deletions core/genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head.SetGasUsed(0)
head.SetCoinbase(common.Zero)
head.SetBaseFee(new(big.Int).SetUint64(params.InitialBaseFee))
head.SetCoinbase(g.Coinbase)
if g.GasLimit == 0 {
head.SetGasLimit(params.GenesisGasLimit)
}
Expand Down
51 changes: 36 additions & 15 deletions core/slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,23 @@ type Slice struct {

func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.Header) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, indexerConfig *IndexerConfig, vmConfig vm.Config, genesis *Genesis, logger *log.Logger) (*Slice, error) {
nodeCtx := chainConfig.Location.Context()
sl, err := newSliceCommon(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, domClientUrl, subClientUrls, engine, cacheConfig, indexerConfig, vmConfig, genesis, nodeCtx, logger)

if err != nil {
return nil, err
}

// only set domClient if the chain is not Prime.
if nodeCtx != common.PRIME_CTX {
go func() {
sl.domClient = makeDomClient(domClientUrl, logger)
}()
}

return sl, nil
}

func newSliceCommon(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.Header) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, indexerConfig *IndexerConfig, vmConfig vm.Config, genesis *Genesis, nodeCtx int, logger *log.Logger) (*Slice, error) {
sl := &Slice{
config: chainConfig,
engine: engine,
Expand Down Expand Up @@ -124,13 +141,6 @@ func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLooku
}()
}

// only set domClient if the chain is not Prime.
if nodeCtx != common.PRIME_CTX {
go func() {
sl.domClient = makeDomClient(domClientUrl, sl.logger)
}()
}

if err := sl.init(genesis); err != nil {
return nil, err
}
Expand All @@ -144,6 +154,24 @@ func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLooku
return sl, nil
}

func NewFakeSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.Header) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, indexerConfig *IndexerConfig, vmConfig vm.Config, genesis *Genesis, logger *log.Logger) (*Slice, error) {
nodeCtx := chainConfig.Location.Context()
sl, err := newSliceCommon(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, domClientUrl, subClientUrls, engine, cacheConfig, indexerConfig, vmConfig, genesis, nodeCtx, logger)

if err != nil {
return nil, err
}

// only set domClient if the chain is not Prime.
if nodeCtx != common.PRIME_CTX {
go func() {
sl.domClient = quaiclient.NewClient(&quaiclient.TestRpcClient{})
}()
}

return sl, nil
}

// Append takes a proposed header and constructs a local block and attempts to hierarchically append it to the block graph.
// If this is called from a dominant context a domTerminus must be provided else a common.Hash{} should be used and domOrigin should be set to true.
// Return of this function is the Etxs generated in the Zone Block, subReorg bool that tells dom if should be mined on, setHead bool that determines if we should set the block as the current head and the error
Expand Down Expand Up @@ -307,15 +335,8 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do

var time8, time9 common.PrettyDuration
var bestPh types.PendingHeader
var exist bool
if nodeCtx == common.ZONE_CTX {
bestPh, exist = sl.readPhCache(sl.bestPhKey)
if !exist {
sl.WriteBestPhKey(sl.config.GenesisHash)
sl.writePhCache(block.Hash(), pendingHeaderWithTermini)
bestPh = types.EmptyPendingHeader()
sl.logger.WithField("key", sl.bestPhKey).Warn("BestPh Key does not exist")
}
bestPh, _ = sl.readPhCache(sl.bestPhKey)

time8 = common.PrettyDuration(time.Since(start))

Expand Down
58 changes: 58 additions & 0 deletions core/types/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,64 @@ type Header struct {
PowDigest atomic.Value
}

func (h1 *Header) Compare(h2 *Header) error {
if h1 == nil || h2 == nil {
if h1 == h2 {
return nil
}
return fmt.Errorf("Headers are not equal expected %v, got %v", h1, h2)
}

fields := map[string][]interface{}{
"parentHash": {h1.parentHash, h2.parentHash},
"uncleHash": {h1.uncleHash, h2.uncleHash},
"coinbase": {h1.coinbase, h2.coinbase},
"txHash": {h1.txHash, h2.txHash},
"etxHash": {h1.etxHash, h2.etxHash},
"etxRollupHash": {h1.etxRollupHash, h2.etxRollupHash},
"manifestHash": {h1.manifestHash, h2.manifestHash},
"receiptHash": {h1.receiptHash, h2.receiptHash},
"difficulty": {h1.difficulty, h2.difficulty},
"number": {h1.number, h2.number},
"gasLimit": {h1.gasLimit, h2.gasLimit},
"gasUsed": {h1.gasUsed, h2.gasUsed},
"baseFee": {h1.baseFee, h2.baseFee},
"location": {h1.location, h2.location},
"time": {h1.time, h2.time},
"extra": {h1.extra, h2.extra},
"mixHash": {h1.mixHash, h2.mixHash},
"nonce": {h1.nonce, h2.nonce},
"hash": {h1.hash, h2.hash},
"sealHash": {h1.sealHash, h2.sealHash},
"PowHash": {h1.PowHash, h2.PowHash},
"PowDigest": {h1.PowDigest, h2.PowDigest},
}

for fieldName, values := range fields {
if !reflect.DeepEqual(values[0], values[1]) {
return fmt.Errorf("Field %s is not equal expected %v, got %v", fieldName, values[0], values[1])
}
}

if len(h1.parentEntropy) != len(h2.parentEntropy) {
return fmt.Errorf("Field parentEntropy is not equal expected %v, got %v", h1.parentEntropy, h2.parentEntropy)
}
for i := range h1.parentEntropy {
if h1.parentEntropy[i].Cmp(h2.parentEntropy[i]) != 0 {
return fmt.Errorf("Field parentEntropy at index %d is not equal expected %v, got %v", i, h1.parentEntropy[i], h2.parentEntropy[i])
}
}
if len(h1.parentDeltaS) != len(h2.parentDeltaS) {
return fmt.Errorf("Field parentEntropy is not equal expected %v, got %v", h1.parentEntropy, h2.parentEntropy)
}
for i := range h1.parentDeltaS {
if h1.parentEntropy[i].Cmp(h2.parentDeltaS[i]) != 0 {
return fmt.Errorf("Field parentDeltaS at index %d is not equal expected %v, got %v", i, h1.parentDeltaS[i], h2.parentDeltaS[i])
}
}
return nil
}

// field type overrides for gencodec
type headerMarshaling struct {
Difficulty *hexutil.Big
Expand Down
1 change: 1 addition & 0 deletions log/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ const (
defaultLogMaxBackups = 3 // maximum number of old log files to keep
defaultLogMaxAge = 28 // maximum number of days to retain old log files
defaultLogCompress = true // whether to compress the rotated log files using gzip
DebugLevel = logrus.DebugLevel
)

var (
Expand Down
Loading
Loading