From e0a573c0a205f504a4542ae3b9e6e42eb26524bf Mon Sep 17 00:00:00 2001 From: Rob Schleusner Date: Wed, 3 Jan 2024 15:19:21 -0600 Subject: [PATCH] Organizes several loggers to print to different files --- cmd/go-quai/start.go | 3 +- cmd/utils/cmd.go | 51 +++++---- cmd/utils/flags.go | 32 +++--- consensus/blake3pow/blake3pow.go | 9 +- consensus/blake3pow/consensus.go | 16 +-- consensus/blake3pow/sealer.go | 37 ++++--- consensus/progpow/consensus.go | 16 +-- consensus/progpow/progpow.go | 39 ++++--- consensus/progpow/sealer.go | 29 +++--- core/block_validator.go | 3 +- core/bloom_indexer.go | 11 +- core/bodydb.go | 5 +- core/chain_indexer.go | 19 ++-- core/chain_makers.go | 2 +- core/core.go | 37 ++++--- core/genesis.go | 20 ++-- core/headerchain.go | 31 +++--- core/miner.go | 7 +- core/rawdb/accessors_metadata.go | 6 +- core/rawdb/database.go | 32 +++--- core/rawdb/databases_64bit.go | 5 +- core/rawdb/freezer.go | 65 ++++++------ core/slice.go | 109 ++++++++++---------- core/state/pruner/pruner.go | 16 +-- core/state_processor.go | 50 ++++----- core/tx_journal.go | 12 ++- core/tx_pool.go | 171 ++++++++++++++++--------------- core/worker.go | 53 +++++----- ethdb/leveldb/leveldb.go | 39 +++---- ethdb/pebble/pebble.go | 9 +- log/interface.go | 57 +++++++---- log/logger.go | 56 ++++++---- log/options.go | 25 ++--- node/node.go | 15 +-- node/rpcstack.go | 22 ++-- p2p/node/events.go | 2 +- quai/backend.go | 49 ++++----- quai/gasprice/feehistory.go | 5 +- quai/gasprice/gasprice.go | 17 +-- quai/quaiconfig/config.go | 20 ++-- quaiclient/quaiclient.go | 8 +- 41 files changed, 648 insertions(+), 562 deletions(-) diff --git a/cmd/go-quai/start.go b/cmd/go-quai/start.go index ddbefa9681..91278077e4 100644 --- a/cmd/go-quai/start.go +++ b/cmd/go-quai/start.go @@ -68,8 +68,9 @@ func runStart(cmd *cobra.Command, args []string) error { log.Fatalf("error creating node: %s", err) } + logLevel := cmd.Flag(utils.LogLevelFlag.Name).Value.String() // create instance of consensus backend - consensus, err := utils.StartQuaiBackend() + consensus, err := utils.StartQuaiBackend(logLevel) if err != nil { log.Fatalf("error creating consensus backend: %s", err) } diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index d490ee41dd..7d786ab99f 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -2,6 +2,10 @@ package utils import ( "fmt" + "io" + "os" + "runtime" + "time" "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/core/vm" @@ -13,10 +17,6 @@ import ( "github.com/dominant-strategies/go-quai/quai/quaiconfig" "github.com/dominant-strategies/go-quai/quaistats" "github.com/spf13/viper" - "io" - "os" - "runtime" - "time" ) type quaistatsConfig struct { @@ -30,12 +30,15 @@ type quaiConfig struct { } // Create a new instance of the QuaiBackend consensus service -func StartQuaiBackend() (*quai.QuaiBackend, error) { - +func StartQuaiBackend(logLevel string) (*quai.QuaiBackend, error) { + var logger *log.LogWrapper // Make full node go func() { - log.Info("Starting Prime") - stackPrime := makeFullNode(nil) + // Create the prime logger with the log file path + // TODO: Save log level as global variable and set it + logger = log.New("nodelogs/prime.log", logLevel) + logger.Info("Starting Prime") + stackPrime := makeFullNode(nil, logger) defer stackPrime.Close() StartNode(stackPrime) stackPrime.Wait() @@ -44,8 +47,11 @@ func StartQuaiBackend() (*quai.QuaiBackend, error) { time.Sleep(2 * time.Second) go func() { - log.Info("Starting Region") - stackRegion := makeFullNode(common.Location{0}) + // Create the prime logger with the log file path + // TODO: Save log level as global variable and set it + logger = log.New("nodelogs/region-0.log", logLevel) + logger.Info("Starting Region") + stackRegion := makeFullNode(common.Location{0}, logger) defer stackRegion.Close() StartNode(stackRegion) stackRegion.Wait() @@ -54,8 +60,11 @@ func StartQuaiBackend() (*quai.QuaiBackend, error) { time.Sleep(2 * time.Second) go func() { + // Create the prime logger with the log file path + // TODO: Save log level as global variable and set it + logger = log.New("nodelogs/zone-0-0.log", logLevel) log.Info("Starting Zone") - stackZone := makeFullNode(common.Location{0, 0}) + stackZone := makeFullNode(common.Location{0, 0}, logger) defer stackZone.Close() StartNode(stackZone) stackZone.Wait() @@ -72,7 +81,7 @@ func StartNode(stack *node.Node) { } // makeConfigNode loads quai configuration and creates a blank node instance. -func makeConfigNode(nodeLocation common.Location) (*node.Node, quaiConfig) { +func makeConfigNode(nodeLocation common.Location, logger log.Logger) (*node.Node, quaiConfig) { // Load defaults. cfg := quaiConfig{ Quai: quaiconfig.Defaults, @@ -81,15 +90,15 @@ func makeConfigNode(nodeLocation common.Location) (*node.Node, quaiConfig) { // Apply flags. // set the node location - log.Info("Node", "Location", nodeLocation) + logger.Info("Node", "Location", nodeLocation) cfg.Node.NodeLocation = nodeLocation - SetNodeConfig(&cfg.Node, nodeLocation) - stack, err := node.New(&cfg.Node) + SetNodeConfig(&cfg.Node, nodeLocation, logger) + stack, err := node.New(&cfg.Node, logger) if err != nil { Fatalf("Failed to create the protocol stack: %v", err) } - SetQuaiConfig(stack, &cfg.Quai, nodeLocation) + SetQuaiConfig(stack, &cfg.Quai, nodeLocation, logger) // TODO: Apply stats if viper.IsSet(QuaiStatsURLFlag.Name) { @@ -114,9 +123,9 @@ func defaultNodeConfig() node.Config { } // makeFullNode loads quai configuration and creates the Quai backend. -func makeFullNode(nodeLocation common.Location) *node.Node { - stack, cfg := makeConfigNode(nodeLocation) - backend, _ := RegisterQuaiService(stack, cfg.Quai, cfg.Node.NodeLocation.Context()) +func makeFullNode(nodeLocation common.Location, logger log.Logger) *node.Node { + stack, cfg := makeConfigNode(nodeLocation, logger) + backend, _ := RegisterQuaiService(stack, cfg.Quai, cfg.Node.NodeLocation.Context(), logger) sendfullstats := viper.GetBool(SendFullStatsFlag.Name) // Add the Quai Stats daemon if requested. if cfg.Ethstats.URL != "" { @@ -128,8 +137,8 @@ func makeFullNode(nodeLocation common.Location) *node.Node { // RegisterQuaiService adds a Quai client to the stack. // The second return value is the full node instance, which may be nil if the // node is running as a light client. -func RegisterQuaiService(stack *node.Node, cfg quaiconfig.Config, nodeCtx int) (quaiapi.Backend, error) { - backend, err := quai.New(stack, &cfg, nodeCtx) +func RegisterQuaiService(stack *node.Node, cfg quaiconfig.Config, nodeCtx int, logger log.Logger) (quaiapi.Backend, error) { + backend, err := quai.New(stack, &cfg, nodeCtx, logger) if err != nil { Fatalf("Failed to register the Quai service: %v", err) } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index babc0af16b..3012bc17ff 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -939,7 +939,7 @@ func setWS(cfg *node.Config, nodeLocation common.Location) { } // setDomUrl sets the dominant chain websocket url. -func setDomUrl(cfg *quaiconfig.Config, nodeLocation common.Location) { +func setDomUrl(cfg *quaiconfig.Config, nodeLocation common.Location, logger log.Logger) { // only set the dom url if the node is not prime if nodeLocation != nil { if len(nodeLocation) == 1 { @@ -948,7 +948,7 @@ func setDomUrl(cfg *quaiconfig.Config, nodeLocation common.Location) { cfg.DomUrl = "ws://127.0.0.1:8002" } } - log.Info("Node", "Location", nodeLocation, "domurl", cfg.DomUrl) + logger.Info("Node", "Location", nodeLocation, "domurl", cfg.DomUrl) } // setSubUrls sets the subordinate chain urls @@ -1069,7 +1069,7 @@ func MakePasswordList() []string { } // SetNodeConfig applies node-related command line flags to the config. -func SetNodeConfig(cfg *node.Config, nodeLocation common.Location) { +func SetNodeConfig(cfg *node.Config, nodeLocation common.Location, logger log.Logger) { setHTTP(cfg, nodeLocation) setWS(cfg, nodeLocation) setNodeUserIdent(cfg) @@ -1086,7 +1086,7 @@ func SetNodeConfig(cfg *node.Config, nodeLocation common.Location) { cfg.UseLightweightKDF = true } if viper.IsSet(NoUSBFlag.Name) || cfg.NoUSB { - log.Warn("Option nousb is deprecated and USB is deactivated by default. Use --usb to enable") + logger.Warn("Option nousb is deprecated and USB is deactivated by default. Use --usb to enable") } if viper.IsSet(USBFlag.Name) { cfg.USB = viper.GetBool(USBFlag.Name) @@ -1099,7 +1099,7 @@ func SetNodeConfig(cfg *node.Config, nodeLocation common.Location) { if dbEngine != "leveldb" && dbEngine != "pebble" { Fatalf("Invalid choice for db.engine '%s', allowed 'leveldb' or 'pebble'", dbEngine) } - log.Info(fmt.Sprintf("Using %s as db engine", dbEngine)) + logger.Info(fmt.Sprintf("Using %s as db engine", dbEngine)) cfg.DBEngine = dbEngine } } @@ -1329,7 +1329,7 @@ func CheckExclusive(args ...interface{}) { } // SetQuaiConfig applies quai-related command line flags to the config. -func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common.Location) { +func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common.Location, logger log.Logger) { // Avoid conflicting network flags CheckExclusive(ColosseumFlag, DeveloperFlag, GardenFlag, OrchardFlag, LocalFlag, LighthouseFlag) CheckExclusive(DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer @@ -1337,7 +1337,7 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common if viper.GetString(GCModeFlag.Name) == "archive" && viper.GetUint64(TxLookupLimitFlag.Name) != 0 { // TODO: see what this is supposed to do viper.IsSet(TxLookupLimitFlag.Name) - log.Warn("Disable transaction unindexing for archive node") + logger.Warn("Disable transaction unindexing for archive node") } cfg.NodeLocation = nodeLocation @@ -1359,7 +1359,7 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common setWhitelist(cfg) // set the dominant chain websocket url - setDomUrl(cfg, nodeLocation) + setDomUrl(cfg, nodeLocation, logger) // set the subordinate chain websocket urls setSubUrls(cfg, nodeLocation) @@ -1374,12 +1374,12 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common mem, err := gopsutil.VirtualMemory() if err == nil { if 32<<(^uintptr(0)>>63) == 32 && mem.Total > 2*1024*1024*1024 { - log.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) + logger.Warn("Lowering memory allowance on 32bit arch", "available", mem.Total/1024/1024, "addressable", 2*1024) mem.Total = 2 * 1024 * 1024 * 1024 } allowance := int(mem.Total / 1024 / 1024 / 3) if cache := viper.GetInt(CacheFlag.Name); cache > allowance { - log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) + logger.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) viper.GetViper().Set(CacheFlag.Name, strconv.Itoa(allowance)) } } @@ -1387,7 +1387,7 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common cache := viper.GetInt(CacheFlag.Name) gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) - log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) + logger.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) godebug.SetGCPercent(int(gogc)) if viper.IsSet(NetworkIdFlag.Name) { @@ -1414,7 +1414,7 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common cfg.Preimages = viper.GetBool(CachePreimagesFlag.Name) if cfg.NoPruning && !cfg.Preimages { cfg.Preimages = true - log.Info("Enabling recording of key preimages since archive mode is used") + logger.Info("Enabling recording of key preimages since archive mode is used") } if viper.IsSet(TxLookupLimitFlag.Name) { cfg.TxLookupLimit = viper.GetUint64(TxLookupLimitFlag.Name) @@ -1450,9 +1450,9 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common cfg.RPCGasCap = viper.GetUint64(RPCGlobalGasCapFlag.Name) } if cfg.RPCGasCap != 0 { - log.Info("Set global gas cap", "cap", cfg.RPCGasCap) + logger.Info("Set global gas cap", "cap", cfg.RPCGasCap) } else { - log.Info("Global gas cap disabled") + logger.Info("Global gas cap disabled") } if viper.IsSet(RPCGlobalTxFeeCapFlag.Name) { cfg.RPCTxFeeCap = viper.GetFloat64(RPCGlobalTxFeeCapFlag.Name) @@ -1516,9 +1516,9 @@ func SetQuaiConfig(stack *node.Node, cfg *quaiconfig.Config, nodeLocation common cfg.Genesis.Nonce = viper.GetUint64(GenesisNonceFlag.Name) } - log.Info("Setting genesis Location", "node", nodeLocation) + logger.Info("Setting genesis Location", "node", nodeLocation) cfg.Genesis.Config.Location = nodeLocation - log.Info("Location after setting", "genesis", cfg.Genesis.Config.Location) + logger.Info("Location after setting", "genesis", cfg.Genesis.Config.Location) } func SplitTagsFlag(tagsFlag string) map[string]string { diff --git a/consensus/blake3pow/blake3pow.go b/consensus/blake3pow/blake3pow.go index 4b6643b775..fec0200ab0 100644 --- a/consensus/blake3pow/blake3pow.go +++ b/consensus/blake3pow/blake3pow.go @@ -21,7 +21,7 @@ func init() { sharedConfig := Config{ PowMode: ModeNormal, } - sharedBlake3pow = New(sharedConfig, nil, false) + sharedBlake3pow = New(sharedConfig, nil, false, log.New("nodelogs/shared-blake3pow.log", "info")) } // Mode defines the type and amount of PoW verification a blake3pow engine makes. @@ -71,15 +71,18 @@ type Blake3pow struct { lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields closeOnce sync.Once // Ensures exit channel will not be closed twice. + + logger log.Logger } // New creates a full sized blake3pow PoW scheme and starts a background thread for // remote mining, also optionally notifying a batch of remote services of new work // packages. -func New(config Config, notify []string, noverify bool) *Blake3pow { +func New(config Config, notify []string, noverify bool, logger log.Logger) *Blake3pow { blake3pow := &Blake3pow{ config: config, update: make(chan struct{}), + logger: logger, } if config.PowMode == ModeShared { blake3pow.shared = sharedBlake3pow @@ -91,7 +94,7 @@ func New(config Config, notify []string, noverify bool) *Blake3pow { // NewTester creates a small sized blake3pow PoW scheme useful only for testing // purposes. func NewTester(notify []string, noverify bool) *Blake3pow { - return New(Config{PowMode: ModeTest}, notify, noverify) + return New(Config{PowMode: ModeTest}, notify, noverify, log.New("nodelogs/test-blake3pow.log", "info")) } // NewFaker creates a blake3pow consensus engine with a fake PoW scheme that accepts diff --git a/consensus/blake3pow/consensus.go b/consensus/blake3pow/consensus.go index 612556f6bc..68330e2f06 100644 --- a/consensus/blake3pow/consensus.go +++ b/consensus/blake3pow/consensus.go @@ -339,7 +339,7 @@ func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, pa nodeCtx := blake3pow.config.NodeLocation.Context() if nodeCtx != common.ZONE_CTX { - log.Error("Cannot CalcDifficulty for", "context", nodeCtx) + blake3pow.logger.Error("Cannot CalcDifficulty for", "context", nodeCtx) return nil } @@ -431,17 +431,17 @@ func (blake3pow *Blake3pow) Finalize(chain consensus.ChainHeaderReader, header * nodeLocation := blake3pow.config.NodeLocation nodeCtx := blake3pow.config.NodeLocation.Context() // Accumulate any block and uncle rewards and commit the final state root - accumulateRewards(chain.Config(), state, header, uncles) + accumulateRewards(chain.Config(), state, header, uncles, blake3pow.logger) if nodeCtx == common.ZONE_CTX && header.ParentHash(nodeCtx) == chain.Config().GenesisHash { - alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_" + nodeLocation.Name() + ".json") - log.Info("Allocating genesis accounts", "num", len(alloc)) + alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_"+nodeLocation.Name()+".json", blake3pow.logger) + blake3pow.logger.Info("Allocating genesis accounts", "num", len(alloc)) for addressString, account := range alloc { addr := common.HexToAddress(addressString, nodeLocation) internal, err := addr.InternalAddress() if err != nil { - log.Error("Provided address in genesis block is out of scope") + blake3pow.logger.Error("Provided address in genesis block is out of scope") } state.AddBalance(internal, account.Balance) state.SetCode(internal, account.Code) @@ -475,14 +475,14 @@ func (blake3pow *Blake3pow) ComputePowLight(header *types.Header) (common.Hash, // AccumulateRewards credits the coinbase of the given block with the mining // reward. The total reward consists of the static block reward and rewards for // included uncles. The coinbase of each uncle block is also rewarded. -func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { +func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header, logger log.Logger) { nodeCtx := config.Location.Context() // Select the correct block reward based on chain progression blockReward := misc.CalculateReward(header) coinbase, err := header.Coinbase().InternalAddress() if err != nil { - log.Error("Block has out of scope coinbase, skipping block reward", "Address", header.Coinbase().String(), "Hash", header.Hash().String()) + logger.Error("Block has out of scope coinbase, skipping block reward", "Address", header.Coinbase().String(), "Hash", header.Hash().String()) return } @@ -492,7 +492,7 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header for _, uncle := range uncles { coinbase, err := uncle.Coinbase().InternalAddress() if err != nil { - log.Error("Found uncle with out of scope coinbase, skipping reward", "Address", uncle.Coinbase().String(), "Hash", uncle.Hash().String()) + logger.Error("Found uncle with out of scope coinbase, skipping reward", "Address", uncle.Coinbase().String(), "Hash", uncle.Hash().String()) continue } r.Add(uncle.Number(nodeCtx), big8) diff --git a/consensus/blake3pow/sealer.go b/consensus/blake3pow/sealer.go index 1f876d98df..724879a166 100644 --- a/consensus/blake3pow/sealer.go +++ b/consensus/blake3pow/sealer.go @@ -17,7 +17,6 @@ import ( "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/common/hexutil" "github.com/dominant-strategies/go-quai/core/types" - "github.com/dominant-strategies/go-quai/log" ) const ( @@ -40,7 +39,7 @@ func (blake3pow *Blake3pow) Seal(header *types.Header, results chan<- *types.Hea select { case results <- header: default: - log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", header.SealHash()) + blake3pow.logger.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", header.SealHash()) } return nil } @@ -91,14 +90,14 @@ func (blake3pow *Blake3pow) Seal(header *types.Header, results chan<- *types.Hea select { case results <- result: default: - log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", header.SealHash()) + blake3pow.logger.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", header.SealHash()) } close(abort) case <-blake3pow.update: // Thread count was changed on user request, restart close(abort) if err := blake3pow.Seal(header, results, stop); err != nil { - log.Error("Failed to restart sealing after update", "err", err) + blake3pow.logger.Error("Failed to restart sealing after update", "err", err) } } // Wait for all miners to terminate and return the block @@ -120,13 +119,13 @@ func (blake3pow *Blake3pow) mine(header *types.Header, id int, seed uint64, abor nonce = seed powBuffer = new(big.Int) ) - log.Trace("Started blake3pow search for new nonces", "seed", seed) + blake3pow.logger.Trace("Started blake3pow search for new nonces", "seed", seed) search: for { select { case <-abort: // Mining terminated, update stats and abort - log.Trace("Blake3pow nonce search aborted", "attempts", nonce-seed) + blake3pow.logger.Trace("Blake3pow nonce search aborted", "attempts", nonce-seed) break search default: @@ -145,9 +144,9 @@ search: // Seal and return a block (if still needed) select { case found <- header: - log.Trace("Blake3pow nonce found and reported", "attempts", nonce-seed, "nonce", nonce) + blake3pow.logger.Trace("Blake3pow nonce found and reported", "attempts", nonce-seed, "nonce", nonce) case <-abort: - log.Trace("Blake3pow nonce found but discarded", "attempts", nonce-seed, "nonce", nonce) + blake3pow.logger.Trace("Blake3pow nonce found but discarded", "attempts", nonce-seed, "nonce", nonce) } break search } @@ -234,7 +233,7 @@ func startRemoteSealer(blake3pow *Blake3pow, urls []string, noverify bool) *remo func (s *remoteSealer) loop() { defer func() { - log.Trace("Blake3pow remote sealer is exiting") + s.blake3pow.logger.Trace("Blake3pow remote sealer is exiting") s.cancelNotify() s.reqWG.Wait() close(s.exitCh) @@ -350,7 +349,7 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json [] req, err := http.NewRequest("POST", url, bytes.NewReader(json)) if err != nil { - log.Warn("Can't create remote miner notification", "err", err) + s.blake3pow.logger.Warn("Can't create remote miner notification", "err", err) return } ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout) @@ -360,9 +359,9 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json [] resp, err := http.DefaultClient.Do(req) if err != nil { - log.Warn("Failed to notify remote miner", "err", err) + s.blake3pow.logger.Warn("Failed to notify remote miner", "err", err) } else { - log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2]) + s.blake3pow.logger.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2]) resp.Body.Close() } } @@ -372,14 +371,14 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json [] // any other error, like no pending work or stale mining result). func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) bool { if s.currentHeader == nil { - log.Error("Pending work without block", "sealhash", sealhash) + s.blake3pow.logger.Error("Pending work without block", "sealhash", sealhash) return false } nodeCtx := s.blake3pow.config.NodeLocation.Context() // Make sure the work submitted is present header := s.works[sealhash] if header == nil { - log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentHeader.NumberU64(nodeCtx)) + s.blake3pow.logger.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentHeader.NumberU64(nodeCtx)) return false } // Verify the correctness of submitted result. @@ -388,10 +387,10 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) start := time.Now() // Make sure the result channel is assigned. if s.results == nil { - log.Warn("Blake3pow result channel is empty, submitted mining result is rejected") + s.blake3pow.logger.Warn("Blake3pow result channel is empty, submitted mining result is rejected") return false } - log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + s.blake3pow.logger.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) // Solutions seems to be valid, return to the miner and notify acceptance. solution := header @@ -400,14 +399,14 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) if solution.NumberU64(nodeCtx)+staleThreshold > s.currentHeader.NumberU64(nodeCtx) { select { case s.results <- solution: - log.Debug("Work submitted is acceptable", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) + s.blake3pow.logger.Debug("Work submitted is acceptable", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) return true default: - log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) + s.blake3pow.logger.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) return false } } // The submitted block is too old to accept, drop it. - log.Warn("Work submitted is too old", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) + s.blake3pow.logger.Warn("Work submitted is too old", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) return false } diff --git a/consensus/progpow/consensus.go b/consensus/progpow/consensus.go index aa7b7d66c9..0a62c3d28c 100644 --- a/consensus/progpow/consensus.go +++ b/consensus/progpow/consensus.go @@ -338,7 +338,7 @@ func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent nodeCtx := progpow.NodeLocation().Context() if nodeCtx != common.ZONE_CTX { - log.Error("Cannot CalcDifficulty for", "context", nodeCtx) + progpow.logger.Error("Cannot CalcDifficulty for", "context", nodeCtx) return nil } @@ -466,19 +466,19 @@ func (progpow *Progpow) Prepare(chain consensus.ChainHeaderReader, header *types // setting the final state on the header func (progpow *Progpow) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { // Accumulate any block and uncle rewards and commit the final state root - accumulateRewards(chain.Config(), state, header, uncles) + accumulateRewards(chain.Config(), state, header, uncles, progpow.logger) nodeLocation := progpow.NodeLocation() nodeCtx := progpow.NodeLocation().Context() if nodeCtx == common.ZONE_CTX && header.ParentHash(nodeCtx) == chain.Config().GenesisHash { - alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_" + nodeLocation.Name() + ".json") - log.Info("Allocating genesis accounts", "num", len(alloc)) + alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_"+nodeLocation.Name()+".json", progpow.logger) + progpow.logger.Info("Allocating genesis accounts", "num", len(alloc)) for addressString, account := range alloc { addr := common.HexToAddress(addressString, nodeLocation) internal, err := addr.InternalAddress() if err != nil { - log.Error("Provided address in genesis block is out of scope") + progpow.logger.Error("Provided address in genesis block is out of scope") } state.AddBalance(internal, account.Balance) state.SetCode(internal, account.Code) @@ -512,14 +512,14 @@ func (progpow *Progpow) NodeLocation() common.Location { // AccumulateRewards credits the coinbase of the given block with the mining // reward. The total reward consists of the static block reward and rewards for // included uncles. The coinbase of each uncle block is also rewarded. -func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { +func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header, logger log.Logger) { // Select the correct block reward based on chain progression blockReward := misc.CalculateReward(header) nodeCtx := config.Location.Context() coinbase, err := header.Coinbase().InternalAddress() if err != nil { - log.Error("Block has out-of-scope coinbase, skipping block reward: " + header.Hash().String()) + logger.Error("Block has out-of-scope coinbase, skipping block reward: " + header.Hash().String()) return } @@ -529,7 +529,7 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header for _, uncle := range uncles { coinbase, err := uncle.Coinbase().InternalAddress() if err != nil { - log.Error("Found uncle with out-of-scope coinbase, skipping reward: " + uncle.Hash().String()) + logger.Error("Found uncle with out-of-scope coinbase, skipping reward: " + uncle.Hash().String()) continue } r.Add(uncle.Number(nodeCtx), big8) diff --git a/consensus/progpow/progpow.go b/consensus/progpow/progpow.go index f7bb469eb8..86e62f964f 100644 --- a/consensus/progpow/progpow.go +++ b/consensus/progpow/progpow.go @@ -37,7 +37,7 @@ func init() { sharedConfig := Config{ PowMode: ModeNormal, } - sharedProgpow = New(sharedConfig, nil, false) + sharedProgpow = New(sharedConfig, nil, false, log.New("nodelogs/shared-progpow.log", "info")) } // isLittleEndian returns whether the local system is running in little or big @@ -186,23 +186,26 @@ type Progpow struct { lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields closeOnce sync.Once // Ensures exit channel will not be closed twice. + + logger log.Logger } // New creates a full sized progpow PoW scheme and starts a background thread for // remote mining, also optionally notifying a batch of remote services of new work // packages. -func New(config Config, notify []string, noverify bool) *Progpow { +func New(config Config, notify []string, noverify bool, logger log.Logger) *Progpow { if config.CachesInMem <= 0 { - log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) + logger.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem) config.CachesInMem = 1 } if config.CacheDir != "" && config.CachesOnDisk > 0 { - log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) + logger.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk) } progpow := &Progpow{ config: config, - caches: newlru("cache", config.CachesInMem, newCache), + caches: newlru("cache", config.CachesInMem, newCache, logger), update: make(chan struct{}), + logger: logger, } if config.PowMode == ModeShared { progpow.shared = sharedProgpow @@ -214,7 +217,7 @@ func New(config Config, notify []string, noverify bool) *Progpow { // NewTester creates a small sized progpow PoW scheme useful only for testing // purposes. func NewTester(notify []string, noverify bool) *Progpow { - return New(Config{PowMode: ModeTest}, notify, noverify) + return New(Config{PowMode: ModeTest}, notify, noverify, log.New("nodelogs/test-progpow.log", "info")) } // NewFaker creates a progpow consensus engine with a fake PoW scheme that accepts @@ -291,18 +294,20 @@ type lru struct { cache *simplelru.LRU future uint64 futureItem interface{} + + logger log.Logger } // newlru create a new least-recently-used cache for either the verification caches // or the mining datasets. -func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru { +func newlru(what string, maxItems int, new func(epoch uint64) interface{}, logger log.Logger) *lru { if maxItems <= 0 { maxItems = 1 } cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) { - log.Trace("Evicted ethash "+what, "epoch", key) + logger.Trace("Evicted ethash "+what, "epoch", key) }) - return &lru{what: what, new: new, cache: cache} + return &lru{what: what, new: new, cache: cache, logger: logger} } // get retrieves or creates an item for the given epoch. The first return value is always @@ -318,14 +323,14 @@ func (lru *lru) get(epoch uint64) (item, future interface{}) { if lru.future > 0 && lru.future == epoch { item = lru.futureItem } else { - log.Trace("Requiring new ethash "+lru.what, "epoch", epoch) + lru.logger.Trace("Requiring new ethash "+lru.what, "epoch", epoch) item = lru.new(epoch) } lru.cache.Add(epoch, item) } // Update the 'future item' if epoch is larger than previously seen. if epoch < maxEpoch-1 && lru.future < epoch+1 { - log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) + lru.logger.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1) future = lru.new(epoch + 1) lru.future = epoch + 1 lru.futureItem = future @@ -350,7 +355,7 @@ func newCache(epoch uint64) interface{} { } // generate ensures that the cache content is generated before use. -func (c *cache) generate(dir string, limit int, lock bool, test bool) { +func (c *cache) generate(dir string, limit int, lock bool, test bool, logger log.Logger) { c.once.Do(func() { size := cacheSize(c.epoch*epochLength + 1) seed := seedHash(c.epoch*epochLength + 1) @@ -380,17 +385,17 @@ func (c *cache) generate(dir string, limit int, lock bool, test bool) { var err error c.dump, c.mmap, c.cache, err = memoryMap(path, lock) if err == nil { - log.Debug("Loaded old ethash cache from disk") + logger.Debug("Loaded old ethash cache from disk") c.cDag = make([]uint32, progpowCacheWords) generateCDag(c.cDag, c.cache, c.epoch) return } - log.Debug("Failed to load old ethash cache", "err", err) + logger.Debug("Failed to load old ethash cache", "err", err) // No previous cache available, create a new cache file to fill c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) }) if err != nil { - log.Error("Failed to generate mapped ethash cache", "err", err) + logger.Error("Failed to generate mapped ethash cache", "err", err) c.cache = make([]uint32, size/4) generateCache(c.cache, c.epoch, seed) @@ -424,12 +429,12 @@ func (progpow *Progpow) cache(block uint64) *cache { current := currentI.(*cache) // Wait for generation finish. - current.generate(progpow.config.CacheDir, progpow.config.CachesOnDisk, progpow.config.CachesLockMmap, progpow.config.PowMode == ModeTest) + current.generate(progpow.config.CacheDir, progpow.config.CachesOnDisk, progpow.config.CachesLockMmap, progpow.config.PowMode == ModeTest, progpow.logger) // If we need a new future cache, now's a good time to regenerate it. if futureI != nil { future := futureI.(*cache) - go future.generate(progpow.config.CacheDir, progpow.config.CachesOnDisk, progpow.config.CachesLockMmap, progpow.config.PowMode == ModeTest) + go future.generate(progpow.config.CacheDir, progpow.config.CachesOnDisk, progpow.config.CachesLockMmap, progpow.config.PowMode == ModeTest, progpow.logger) } return current } diff --git a/consensus/progpow/sealer.go b/consensus/progpow/sealer.go index 24c36fd99c..41b6ff9614 100644 --- a/consensus/progpow/sealer.go +++ b/consensus/progpow/sealer.go @@ -17,7 +17,6 @@ import ( "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/common/hexutil" "github.com/dominant-strategies/go-quai/core/types" - "github.com/dominant-strategies/go-quai/log" ) const ( @@ -40,7 +39,7 @@ func (progpow *Progpow) Seal(header *types.Header, results chan<- *types.Header, select { case results <- header: default: - log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", header.SealHash()) + progpow.logger.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", header.SealHash()) } return nil } @@ -91,14 +90,14 @@ func (progpow *Progpow) Seal(header *types.Header, results chan<- *types.Header, select { case results <- result: default: - log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", header.SealHash()) + progpow.logger.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", header.SealHash()) } close(abort) case <-progpow.update: // Thread count was changed on user request, restart close(abort) if err := progpow.Seal(header, results, stop); err != nil { - log.Error("Failed to restart sealing after update", "err", err) + progpow.logger.Error("Failed to restart sealing after update", "err", err) } } // Wait for all miners to terminate and return the block @@ -238,7 +237,7 @@ func startRemoteSealer(progpow *Progpow, urls []string, noverify bool) *remoteSe func (s *remoteSealer) loop() { defer func() { - log.Trace("Progpow remote sealer is exiting") + s.progpow.logger.Trace("Progpow remote sealer is exiting") s.cancelNotify() s.reqWG.Wait() close(s.exitCh) @@ -355,7 +354,7 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json [] req, err := http.NewRequest("POST", url, bytes.NewReader(json)) if err != nil { - log.Warn("Can't create remote miner notification", "err", err) + s.progpow.logger.Warn("Can't create remote miner notification", "err", err) return } ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout) @@ -365,9 +364,9 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json [] resp, err := http.DefaultClient.Do(req) if err != nil { - log.Warn("Failed to notify remote miner", "err", err) + s.progpow.logger.Warn("Failed to notify remote miner", "err", err) } else { - log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2]) + s.progpow.logger.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2]) resp.Body.Close() } } @@ -377,14 +376,14 @@ func (s *remoteSealer) sendNotification(ctx context.Context, url string, json [] // any other error, like no pending work or stale mining result). func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) bool { if s.currentHeader == nil { - log.Error("Pending work without block", "sealhash", sealhash) + s.progpow.logger.Error("Pending work without block", "sealhash", sealhash) return false } nodeCtx := s.progpow.config.NodeLocation.Context() // Make sure the work submitted is present header := s.works[sealhash] if header == nil { - log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentHeader.NumberU64(nodeCtx)) + s.progpow.logger.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentHeader.NumberU64(nodeCtx)) return false } // Verify the correctness of submitted result. @@ -396,10 +395,10 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) } // Make sure the result channel is assigned. if s.results == nil { - log.Warn("Progpow result channel is empty, submitted mining result is rejected") + s.progpow.logger.Warn("Progpow result channel is empty, submitted mining result is rejected") return false } - log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + s.progpow.logger.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) // Solutions seems to be valid, return to the miner and notify acceptance. solution := header @@ -408,14 +407,14 @@ func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) if solution.NumberU64(nodeCtx)+staleThreshold > s.currentHeader.NumberU64(nodeCtx) { select { case s.results <- solution: - log.Debug("Work submitted is acceptable", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) + s.progpow.logger.Debug("Work submitted is acceptable", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) return true default: - log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) + s.progpow.logger.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) return false } } // The submitted block is too old to accept, drop it. - log.Warn("Work submitted is too old", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) + s.progpow.logger.Warn("Work submitted is too old", "number", solution.NumberU64(nodeCtx), "sealhash", sealhash, "hash", solution.Hash()) return false } diff --git a/core/block_validator.go b/core/block_validator.go index 7f245e64e0..7819ef99e9 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -24,7 +24,6 @@ import ( "github.com/dominant-strategies/go-quai/consensus" "github.com/dominant-strategies/go-quai/core/state" "github.com/dominant-strategies/go-quai/core/types" - "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/params" "github.com/dominant-strategies/go-quai/trie" ) @@ -137,7 +136,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD if etxHash := types.DeriveSha(emittedEtxs, trie.NewStackTrie(nil)); etxHash != header.EtxHash() { return fmt.Errorf("invalid etx hash (remote: %x local: %x)", header.EtxHash(), etxHash) } - log.Debug("times during validate state:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6) + v.hc.logger.Debug("times during validate state:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6) return nil } diff --git a/core/bloom_indexer.go b/core/bloom_indexer.go index ddd54fef57..30b6e57032 100644 --- a/core/bloom_indexer.go +++ b/core/bloom_indexer.go @@ -26,6 +26,7 @@ import ( "github.com/dominant-strategies/go-quai/core/rawdb" "github.com/dominant-strategies/go-quai/core/types" "github.com/dominant-strategies/go-quai/ethdb" + "github.com/dominant-strategies/go-quai/log" ) const ( @@ -42,18 +43,20 @@ type BloomIndexer struct { gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index section uint64 // Section is the section number being processed currently head common.Hash // Head is the hash of the last header processed + logger log.Logger } // NewBloomIndexer returns a chain indexer that generates bloom bits data for the // canonical chain for fast logs filtering. -func NewBloomIndexer(db ethdb.Database, size, confirms uint64, nodeCtx int) *ChainIndexer { +func NewBloomIndexer(db ethdb.Database, size, confirms uint64, nodeCtx int, logger log.Logger) *ChainIndexer { backend := &BloomIndexer{ - db: db, - size: size, + db: db, + size: size, + logger: logger, } table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix)) - return NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits", nodeCtx) + return NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits", nodeCtx, logger) } // Reset implements core.ChainIndexerBackend, starting a new bloombits index diff --git a/core/bodydb.go b/core/bodydb.go index 1e34da0e4f..24d6c1bd8f 100644 --- a/core/bodydb.go +++ b/core/bodydb.go @@ -42,6 +42,8 @@ type BodyDb struct { processor *StateProcessor slicesRunning []common.Location + + logger log.Logger } func NewBodyDb(db ethdb.Database, engine consensus.Engine, hc *HeaderChain, chainConfig *params.ChainConfig, cacheConfig *CacheConfig, txLookupLimit *uint64, vmConfig vm.Config, slicesRunning []common.Location) (*BodyDb, error) { @@ -52,6 +54,7 @@ func NewBodyDb(db ethdb.Database, engine consensus.Engine, hc *HeaderChain, chai engine: engine, db: db, slicesRunning: slicesRunning, + logger: hc.logger, } // Limiting the number of blocks to be stored in the cache in the case of @@ -99,7 +102,7 @@ func (bc *BodyDb) Append(block *types.Block, newInboundEtxs types.Transactions) } rawdb.WriteTxLookupEntriesByBlock(batch, block, nodeCtx) } - log.Debug("Time taken to", "apply state:", common.PrettyDuration(time.Since(stateApply))) + bc.logger.Debug("Time taken to", "apply state:", common.PrettyDuration(time.Since(stateApply))) if err = batch.Write(); err != nil { return nil, err } diff --git a/core/chain_indexer.go b/core/chain_indexer.go index 2dedf58c01..355032d956 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -93,14 +93,14 @@ type ChainIndexer struct { throttling time.Duration // Disk throttling to prevent a heavy upgrade from hogging resources - log log.Logger - lock sync.Mutex + logger log.Logger + lock sync.Mutex } // NewChainIndexer creates a new chain indexer to do background processing on // chain segments of a given size after certain number of confirmations passed. // The throttling parameter might be used to prevent database thrashing. -func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string, nodeCtx int) *ChainIndexer { +func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend ChainIndexerBackend, section, confirm uint64, throttling time.Duration, kind string, nodeCtx int, logger log.Logger) *ChainIndexer { c := &ChainIndexer{ chainDb: chainDb, indexDb: indexDb, @@ -110,6 +110,7 @@ func NewChainIndexer(chainDb ethdb.Database, indexDb ethdb.Database, backend Cha sectionSize: section, confirmsReq: confirm, throttling: throttling, + logger: logger, } // Initialize database dependent fields and start the updater c.loadValidSections() @@ -284,7 +285,7 @@ func (c *ChainIndexer) updateLoop(nodeCtx int) { if time.Since(updated) > 8*time.Second { if c.knownSections > c.storedSections+1 { updating = true - c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections) + c.logger.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections) } updated = time.Now() } @@ -304,7 +305,7 @@ func (c *ChainIndexer) updateLoop(nodeCtx int) { return default: } - c.log.Error("Section processing failed", "error", err) + c.logger.Error("Section processing failed", "error", err) } c.lock.Lock() @@ -314,16 +315,16 @@ func (c *ChainIndexer) updateLoop(nodeCtx int) { c.setValidSections(section + 1) if c.storedSections == c.knownSections && updating { updating = false - c.log.Info("Finished upgrading chain index") + c.logger.Info("Finished upgrading chain index") } c.cascadedHead = c.storedSections*c.sectionSize - 1 for _, child := range c.children { - c.log.Trace("Cascading chain index update", "head", c.cascadedHead) + c.logger.Trace("Cascading chain index update", "head", c.cascadedHead) child.newHead(c.cascadedHead, false) } } else { // If processing failed, don't retry until further notification - c.log.Debug("Chain index processing failed", "section", section, "err", err) + c.logger.Debug("Chain index processing failed", "section", section, "err", err) c.knownSections = c.storedSections } } @@ -346,7 +347,7 @@ func (c *ChainIndexer) updateLoop(nodeCtx int) { // held while processing, the continuity can be broken by a long reorg, in which // case the function returns with an error. func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash, nodeCtx int) (common.Hash, error) { - c.log.Trace("Processing new chain section", "section", section) + c.logger.Trace("Processing new chain section", "section", section) // Reset and partial processing if err := c.backend.Reset(c.ctx, section, lastHead); err != nil { diff --git a/core/chain_makers.go b/core/chain_makers.go index 7987b784a7..c12e6a45ce 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -107,7 +107,7 @@ func (b *BlockGen) AddTxWithChain(hc *HeaderChain, tx *types.Transaction, etxRLi b.statedb.Prepare(tx.Hash(), len(b.txs)) coinbase := b.header.Coinbase() gasUsed := b.header.GasUsed() - receipt, err := ApplyTransaction(b.config, hc, &coinbase, b.gasPool, b.statedb, b.header, tx, &gasUsed, vm.Config{}, etxRLimit, etxPLimit) + receipt, err := ApplyTransaction(b.config, hc, &coinbase, b.gasPool, b.statedb, b.header, tx, &gasUsed, vm.Config{}, etxRLimit, etxPLimit, hc.logger) if err != nil { panic(err) } diff --git a/core/core.go b/core/core.go index ffbcc266b2..67d56cd33c 100644 --- a/core/core.go +++ b/core/core.go @@ -76,10 +76,12 @@ type Core struct { normalListBackoff uint64 // normalListBackoff is the multiple on c_normalListProcCounter which delays the proc on normal list quit chan struct{} // core quit channel + + logger log.Logger } -func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.Header) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, genesis *Genesis) (*Core, error) { - slice, err := NewSlice(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, domClientUrl, subClientUrls, engine, cacheConfig, vmConfig, genesis) +func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.Header) bool, txConfig *TxPoolConfig, txLookupLimit *uint64, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, genesis *Genesis, logger log.Logger) (*Core, error) { + slice, err := NewSlice(db, config, txConfig, txLookupLimit, isLocalBlock, chainConfig, slicesRunning, domClientUrl, subClientUrls, engine, cacheConfig, vmConfig, genesis, logger) if err != nil { return nil, err } @@ -90,6 +92,7 @@ func NewCore(db ethdb.Database, config *Config, isLocalBlock func(block *types.H quit: make(chan struct{}), procCounter: 0, normalListBackoff: 1, + logger: logger, } // Initialize the sync target to current header parent entropy @@ -130,7 +133,7 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { if !c.processingCache.Contains(block.Hash()) { c.processingCache.Add(block.Hash(), 1) } else { - log.Info("Already processing block:", "Number:", block.Header().NumberArray(), "Hash:", block.Hash()) + c.logger.Info("Already processing block:", "Number:", block.Header().NumberArray(), "Hash:", block.Hash()) return idx, errors.New("Already in process of appending this block") } newPendingEtxs, _, _, err := c.sl.Append(block.Header(), types.EmptyHeader(), common.Hash{}, false, nil) @@ -145,7 +148,7 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { // Only send the pending Etxs to dom if valid, because in the case of running a slice, for the zones that the node doesn't run, it cannot have the etxs generated if pendingEtx.IsValid(trie.NewStackTrie(nil)) { if err := c.SendPendingEtxsToDom(pendingEtx); err != nil { - log.Error("failed to send ETXs to domclient", "block: ", block.Hash(), "err", err) + c.logger.Error("failed to send ETXs to domclient", "block: ", block.Hash(), "err", err) } } } @@ -158,9 +161,9 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { err.Error() == ErrSubNotSyncedToDom.Error() || err.Error() == ErrDomClientNotUp.Error() { if c.sl.CurrentInfo(block.Header()) { - log.Info("Cannot append yet.", "loc", c.NodeLocation().Name(), "number", block.Header().NumberArray(), "hash", block.Hash(), "err", err) + c.logger.Info("Cannot append yet.", "loc", c.NodeLocation().Name(), "number", block.Header().NumberArray(), "hash", block.Hash(), "err", err) } else { - log.Debug("Cannot append yet.", "loc", c.NodeLocation().Name(), "number", block.Header().NumberArray(), "hash", block.Hash(), "err", err) + c.logger.Debug("Cannot append yet.", "loc", c.NodeLocation().Name(), "number", block.Header().NumberArray(), "hash", block.Hash(), "err", err) } if err.Error() == ErrSubNotSyncedToDom.Error() || err.Error() == ErrPendingEtxNotFound.Error() { @@ -170,10 +173,10 @@ func (c *Core) InsertChain(blocks types.Blocks) (int, error) { } return idx, ErrPendingBlock } else if err.Error() != ErrKnownBlock.Error() { - log.Info("Append failed.", "hash", block.Hash(), "err", err) + c.logger.Info("Append failed.", "hash", block.Hash(), "err", err) } if err != nil && strings.Contains(err.Error(), "connection refused") { - log.Error("Append failed because of connection refused error") + c.logger.Error("Append failed because of connection refused error") } else { c.removeFromAppendQueue(block) } @@ -222,7 +225,7 @@ func (c *Core) procAppendQueue() { c.serviceBlocks(hashNumberPriorityList) if len(hashNumberPriorityList) > 0 { - log.Info("Size of hashNumberPriorityList", "len", len(hashNumberPriorityList), "first entry", hashNumberPriorityList[0].Number, "last entry", hashNumberPriorityList[len(hashNumberPriorityList)-1].Number) + c.logger.Info("Size of hashNumberPriorityList", "len", len(hashNumberPriorityList), "first entry", hashNumberPriorityList[0].Number, "last entry", hashNumberPriorityList[len(hashNumberPriorityList)-1].Number) } normalListProcCounter := c.normalListBackoff * c_normalListProcCounter @@ -230,7 +233,7 @@ func (c *Core) procAppendQueue() { c.procCounter = 0 c.serviceBlocks(hashNumberList) if len(hashNumberList) > 0 { - log.Info("Size of hashNumberList", "len", len(hashNumberList), "first entry", hashNumberList[0].Number, "last entry", hashNumberList[len(hashNumberList)-1].Number) + c.logger.Info("Size of hashNumberList", "len", len(hashNumberList), "first entry", hashNumberList[0].Number, "last entry", hashNumberList[len(hashNumberList)-1].Number) } } c.procCounter++ @@ -270,12 +273,12 @@ func (c *Core) serviceBlocks(hashNumberList []types.HashAndNumber) { // If parent header is dom, send a signal to dom to request for the block if it doesnt have it _, parentHeaderOrder, err := c.sl.engine.CalcOrder(parentBlock.Header()) if err != nil { - log.Warn("Error calculating the parent block order in serviceBlocks", "Hash", parentBlock.Hash(), "Number", parentBlock.Header().NumberArray()) + c.logger.Warn("Error calculating the parent block order in serviceBlocks", "Hash", parentBlock.Hash(), "Number", parentBlock.Header().NumberArray()) continue } nodeCtx := c.NodeLocation().Context() if parentHeaderOrder < nodeCtx && c.GetHeaderByHash(parentBlock.Hash()) == nil { - log.Info("Requesting the dom to get the block if it doesnt have and try to append", "Hash", parentBlock.Hash(), "Order", parentHeaderOrder) + c.logger.Info("Requesting the dom to get the block if it doesnt have and try to append", "Hash", parentBlock.Hash(), "Order", parentHeaderOrder) if c.sl.domClient != nil { // send a signal to the required dom to fetch the block if it doesnt have it, or its not in its appendqueue go c.sl.domClient.RequestDomToAppendOrFetch(context.Background(), parentBlock.Hash(), parentBlock.ParentEntropy(c.NodeCtx()), parentHeaderOrder) @@ -296,7 +299,7 @@ func (c *Core) serviceBlocks(hashNumberList []types.HashAndNumber) { c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: block.ParentHash(c.NodeCtx()), Entropy: block.ParentEntropy(c.NodeCtx())}) } } else { - log.Warn("Entry in the FH cache without being in the db: ", "Hash: ", hashAndNumber.Hash) + c.logger.Warn("Entry in the FH cache without being in the db: ", "Hash: ", hashAndNumber.Hash) } } } @@ -309,7 +312,7 @@ func (c *Core) RequestDomToAppendOrFetch(hash common.Hash, entropy *big.Int, ord // If prime all you can do it to ask for the block _, exists := c.appendQueue.Get(hash) if !exists { - log.Debug("Block sub asked doesnt exist in append queue, so request the peers for it", "Hash", hash, "Order", order) + c.logger.Debug("Block sub asked doesnt exist in append queue, so request the peers for it", "Hash", hash, "Order", order) block := c.GetBlockOrCandidateByHash(hash) if block == nil { c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: hash, Entropy: entropy}) // Using the missing parent feed to ask for the block @@ -325,7 +328,7 @@ func (c *Core) RequestDomToAppendOrFetch(hash common.Hash, entropy *big.Int, ord } _, exists := c.appendQueue.Get(hash) if !exists { - log.Debug("Block sub asked doesnt exist in append queue, so request the peers for it", "Hash", hash, "Order", order) + c.logger.Debug("Block sub asked doesnt exist in append queue, so request the peers for it", "Hash", hash, "Order", order) block := c.GetBlockByHash(hash) if block == nil { c.sl.missingBlockFeed.Send(types.BlockRequest{Hash: hash, Entropy: entropy}) // Using the missing parent feed to ask for the block @@ -453,13 +456,13 @@ func (c *Core) startStatsTimer() { // printStats displays stats on syncing, latestHeight, etc. func (c *Core) printStats() { - log.Info("Blocks waiting to be appended", "loc", c.NodeLocation().Name(), "len(appendQueue)", len(c.appendQueue.Keys())) + c.logger.Info("Blocks waiting to be appended", "loc", c.NodeLocation().Name(), "len(appendQueue)", len(c.appendQueue.Keys())) // Print hashes & heights of all queue entries. for _, hash := range c.appendQueue.Keys()[:math.Min(len(c.appendQueue.Keys()), c_appendQueuePrintSize)] { if value, exist := c.appendQueue.Peek(hash); exist { hashNumber := types.HashAndNumber{Hash: hash.(common.Hash), Number: value.(blockNumberAndRetryCounter).number} - log.Debug("AppendQueue entry. Number: " + strconv.FormatUint(hashNumber.Number, 10) + ". Hash: " + hashNumber.Hash.String()) + c.logger.Debug("AppendQueue entry. Number: " + strconv.FormatUint(hashNumber.Number, 10) + ". Hash: " + hashNumber.Hash.String()) } } diff --git a/core/genesis.go b/core/genesis.go index c29e3fdeae..8a90d73a1d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -154,11 +154,11 @@ func (e *GenesisMismatchError) Error() string { // error is a *params.ConfigCompatError and the new, unwritten config is returned. // // The returned chain configuration is never nil. -func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, nodeLocation common.Location) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlockWithOverride(db, genesis, nodeLocation) +func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, nodeLocation common.Location, logger log.Logger) (*params.ChainConfig, common.Hash, error) { + return SetupGenesisBlockWithOverride(db, genesis, nodeLocation, logger) } -func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, nodeLocation common.Location) (*params.ChainConfig, common.Hash, error) { +func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, nodeLocation common.Location, logger log.Logger) (*params.ChainConfig, common.Hash, error) { if genesis != nil && genesis.Config == nil { return params.AllProgpowProtocolChanges, common.Hash{}, errGenesisNoConfig } @@ -166,10 +166,10 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, nodeLoca stored := rawdb.ReadCanonicalHash(db, 0) if (stored == common.Hash{}) { if genesis == nil { - log.Info("Writing default main-net genesis block") + logger.Info("Writing default main-net genesis block") genesis = DefaultGenesisBlock() } else { - log.Info("Writing custom genesis block") + logger.Info("Writing custom genesis block") } block, err := genesis.Commit(db, nodeLocation) if err != nil { @@ -206,7 +206,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, nodeLoca newcfg := genesis.configOrDefault(stored) storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { - log.Warn("Found genesis block without chain config") + logger.Warn("Found genesis block without chain config") rawdb.WriteChainConfig(db, stored, newcfg) return newcfg, stored, nil } @@ -441,17 +441,17 @@ func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis { } } -func ReadGenesisAlloc(filename string) map[string]GenesisAccount { +func ReadGenesisAlloc(filename string, logger log.Logger) map[string]GenesisAccount { jsonFile, err := os.Open(filename) if err != nil { - log.Error(err.Error()) + logger.Error(err.Error()) return nil } defer jsonFile.Close() // Read the file contents byteValue, err := ioutil.ReadAll(jsonFile) if err != nil { - log.Error(err.Error()) + logger.Error(err.Error()) return nil } @@ -459,7 +459,7 @@ func ReadGenesisAlloc(filename string) map[string]GenesisAccount { var data map[string]GenesisAccount err = json.Unmarshal(byteValue, &data) if err != nil { - log.Error(err.Error()) + logger.Error(err.Error()) return nil } diff --git a/core/headerchain.go b/core/headerchain.go index 37d6024334..352e67a7b3 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -72,11 +72,13 @@ type HeaderChain struct { headermu sync.RWMutex heads []*types.Header slicesRunning []common.Location + + logger log.Logger } // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points // to the parent's interrupt semaphore. -func NewHeaderChain(db ethdb.Database, engine consensus.Engine, pEtxsRollupFetcher getPendingEtxsRollup, pEtxsFetcher getPendingEtxs, chainConfig *params.ChainConfig, cacheConfig *CacheConfig, txLookupLimit *uint64, vmConfig vm.Config, slicesRunning []common.Location) (*HeaderChain, error) { +func NewHeaderChain(db ethdb.Database, engine consensus.Engine, pEtxsRollupFetcher getPendingEtxsRollup, pEtxsFetcher getPendingEtxs, chainConfig *params.ChainConfig, cacheConfig *CacheConfig, txLookupLimit *uint64, vmConfig vm.Config, slicesRunning []common.Location, logger log.Logger) (*HeaderChain, error) { headerCache, _ := lru.New(headerCacheLimit) numberCache, _ := lru.New(numberCacheLimit) nodeCtx := chainConfig.Location.Context() @@ -90,6 +92,7 @@ func NewHeaderChain(db ethdb.Database, engine consensus.Engine, pEtxsRollupFetch slicesRunning: slicesRunning, fetchPEtxRollup: pEtxsRollupFetcher, fetchPEtx: pEtxsFetcher, + logger: logger, } pendingEtxsRollup, _ := lru.New(c_maxPendingEtxsRollup) @@ -111,7 +114,7 @@ func NewHeaderChain(db ethdb.Database, engine consensus.Engine, pEtxsRollupFetch if hc.genesisHeader.Hash() != chainConfig.GenesisHash { return nil, fmt.Errorf("genesis block mismatch: have %x, want %x", hc.genesisHeader.Hash(), chainConfig.GenesisHash) } - log.Info("Genesis", "Hash:", hc.genesisHeader.Hash()) + hc.logger.Info("Genesis", "Hash:", hc.genesisHeader.Hash()) if hc.genesisHeader == nil { return nil, ErrNoGenesis } @@ -191,7 +194,7 @@ func (hc *HeaderChain) GetPendingEtxs(hash common.Hash) (*types.PendingEtxs, err } else if res := rawdb.ReadPendingEtxs(hc.headerDb, hash); res != nil { pendingEtxs = *res } else { - log.Trace("unable to find pending etxs for hash in manifest", "hash:", hash.String()) + hc.logger.Trace("unable to find pending etxs for hash in manifest", "hash:", hash.String()) return nil, ErrPendingEtxNotFound } return &pendingEtxs, nil @@ -205,7 +208,7 @@ func (hc *HeaderChain) GetPendingEtxsRollup(hash common.Hash) (*types.PendingEtx } else if res := rawdb.ReadPendingEtxsRollup(hc.headerDb, hash); res != nil { rollups = *res } else { - log.Trace("unable to find pending etxs rollups for hash in manifest", "hash:", hash.String()) + hc.logger.Trace("unable to find pending etxs rollups for hash in manifest", "hash:", hash.String()) return nil, ErrPendingEtxRollupNotFound } return &rollups, nil @@ -220,7 +223,7 @@ func (hc *HeaderChain) GetBloom(hash common.Hash) (*types.Bloom, error) { } else if res := rawdb.ReadBloom(hc.headerDb, hash); res != nil { bloom = *res } else { - log.Debug("unable to find bloom for hash in database", "hash:", hash.String()) + hc.logger.Debug("unable to find bloom for hash in database", "hash:", hash.String()) return nil, ErrBloomNotFound } return &bloom, nil @@ -270,7 +273,7 @@ func (hc *HeaderChain) collectInclusiveEtxRollup(b *types.Block) (types.Transact // Append func (hc *HeaderChain) AppendHeader(header *types.Header) error { nodeCtx := hc.NodeCtx() - log.Debug("HeaderChain Append:", "Header information: Hash:", header.Hash(), "header header hash:", header.Hash(), "Number:", header.NumberU64(nodeCtx), "Location:", header.Location, "Parent:", header.ParentHash(nodeCtx)) + hc.logger.Debug("HeaderChain Append:", "Header information: Hash:", header.Hash(), "header header hash:", header.Hash(), "Number:", header.NumberU64(nodeCtx), "Location:", header.Location, "Parent:", header.ParentHash(nodeCtx)) err := hc.engine.VerifyHeader(hc, header) if err != nil { @@ -306,7 +309,7 @@ func (hc *HeaderChain) AppendBlock(block *types.Block, newInboundEtxs types.Tran if err != nil { return err } - log.Debug("Time taken to", "Append in bc", common.PrettyDuration(time.Since(blockappend))) + hc.logger.Debug("Time taken to", "Append in bc", common.PrettyDuration(time.Since(blockappend))) hc.bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs}) if len(logs) > 0 { @@ -329,7 +332,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) error { // write the head block hash to the db rawdb.WriteHeadBlockHash(hc.headerDb, head.Hash()) - log.Info("Setting the current header", "Hash", head.Hash(), "Number", head.NumberArray()) + hc.logger.Info("Setting the current header", "Hash", head.Hash(), "Number", head.NumberArray()) hc.currentHeader.Store(head) // If head is the normal extension of canonical head, we can return by just wiring the canonical hash. @@ -453,10 +456,10 @@ func (hc *HeaderChain) findCommonAncestor(header *types.Header) *types.Header { func (hc *HeaderChain) AddPendingEtxs(pEtxs types.PendingEtxs) error { if !pEtxs.IsValid(trie.NewStackTrie(nil)) { - log.Info("PendingEtx is not valid") + hc.logger.Info("PendingEtx is not valid") return ErrPendingEtxNotValid } - log.Debug("Received pending ETXs", "block: ", pEtxs.Header.Hash()) + hc.logger.Debug("Received pending ETXs", "block: ", pEtxs.Header.Hash()) // Only write the pending ETXs if we have not seen them before if !hc.pendingEtxs.Contains(pEtxs.Header.Hash()) { // Write to pending ETX database @@ -498,7 +501,7 @@ func (hc *HeaderChain) loadLastState() error { } } else { // Recover the current header - log.Warn("Recovering Current Header") + hc.logger.Warn("Recovering Current Header") recoverdHeader := hc.RecoverCurrentHeader() rawdb.WriteHeadBlockHash(hc.headerDb, recoverdHeader.Hash()) hc.currentHeader.Store(recoverdHeader) @@ -534,7 +537,7 @@ func (hc *HeaderChain) Stop() { if hc.NodeCtx() == common.ZONE_CTX && hc.ProcessingState() { hc.bc.processor.Stop() } - log.Info("headerchain stopped") + hc.logger.Info("headerchain stopped") } // Empty checks if the headerchain is empty. @@ -703,7 +706,7 @@ func (hc *HeaderChain) RecoverCurrentHeader() *types.Header { } } header := hc.GetHeaderByNumber(high) - log.Info("Header Recovered: ", "hash", header.Hash().String()) + hc.logger.Info("Header Recovered: ", "hash", header.Hash().String()) return header } @@ -834,7 +837,7 @@ func (hc *HeaderChain) ExportN(w io.Writer, first uint64, last uint64) error { if first > last { return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) } - log.Info("Exporting batch of blocks", "count", last-first+1) + hc.logger.Info("Exporting batch of blocks", "count", last-first+1) for nr := first; nr <= last; nr++ { block := hc.GetBlockByNumber(nr) diff --git a/core/miner.go b/core/miner.go index 57e4a8e124..3aedb8a21b 100644 --- a/core/miner.go +++ b/core/miner.go @@ -41,15 +41,16 @@ type Miner struct { engine consensus.Engine startCh chan common.Address stopCh chan struct{} + logger log.Logger } -func New(hc *HeaderChain, txPool *TxPool, config *Config, db ethdb.Database, chainConfig *params.ChainConfig, engine consensus.Engine, isLocalBlock func(block *types.Header) bool, processingState bool) *Miner { +func New(hc *HeaderChain, txPool *TxPool, config *Config, db ethdb.Database, chainConfig *params.ChainConfig, engine consensus.Engine, isLocalBlock func(block *types.Header) bool, processingState bool, logger log.Logger) *Miner { miner := &Miner{ hc: hc, engine: engine, startCh: make(chan common.Address), stopCh: make(chan struct{}), - worker: newWorker(config, chainConfig, db, engine, hc, txPool, isLocalBlock, true, processingState), + worker: newWorker(config, chainConfig, db, engine, hc, txPool, isLocalBlock, true, processingState, logger), coinbase: config.Etherbase, } go miner.update() @@ -123,7 +124,7 @@ func (miner *Miner) MakeExtraData(extra []byte) []byte { }) } if uint64(len(extra)) > params.MaximumExtraDataSize { - log.Warn("Miner extra data exceed limit", "extra", hexutil.Bytes(extra), "limit", params.MaximumExtraDataSize) + miner.logger.Warn("Miner extra data exceed limit", "extra", hexutil.Bytes(extra), "limit", params.MaximumExtraDataSize) extra = nil } return extra diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 978502b42b..dfd95d5c2f 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -94,11 +94,11 @@ const crashesToKeep = 10 // the previous data // - a list of timestamps // - a count of how many old unclean-shutdowns have been discarded -func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) { +func PushUncleanShutdownMarker(db ethdb.KeyValueStore, logger log.Logger) ([]uint64, uint64, error) { var uncleanShutdowns crashList // Read old data if data, err := db.Get(uncleanShutdownKey); err != nil { - log.Warn("Error reading unclean shutdown markers", "error", err) + logger.Warn("Error reading unclean shutdown markers", "error", err) } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { return nil, 0, err } @@ -115,7 +115,7 @@ func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) // And save it again data, _ := rlp.EncodeToBytes(uncleanShutdowns) if err := db.Put(uncleanShutdownKey, data); err != nil { - log.Warn("Failed to write unclean-shutdown marker", "err", err) + logger.Warn("Failed to write unclean-shutdown marker", "err", err) return nil, 0, err } return previous, discarded, nil diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 7eea270dbe..f184df16c7 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -126,9 +126,9 @@ func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { // NewDatabaseWithFreezer creates a high level database on top of a given key- // value data store with a freezer moving immutable chain segments into cold // storage. -func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool, nodeCtx int) (ethdb.Database, error) { +func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool, nodeCtx int, logger log.Logger) (ethdb.Database, error) { // Create the idle freezer instance - frdb, err := newFreezer(freezer, namespace, readonly) + frdb, err := newFreezer(freezer, namespace, readonly, logger) if err != nil { return nil, err } @@ -225,12 +225,12 @@ func NewMemoryDatabaseWithCap(size int) ethdb.Database { // NewLevelDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { - db, err := leveldb.New(file, cache, handles, namespace, readonly) +func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool, logger log.Logger) (ethdb.Database, error) { + db, err := leveldb.New(file, cache, handles, namespace, readonly, logger) if err != nil { return nil, err } - log.Info("Using LevelDB as the backing database") + logger.Info("Using LevelDB as the backing database") return NewDatabase(db), nil } @@ -273,15 +273,15 @@ type OpenOptions struct { // +---------------------------------------- // db is non-existent | leveldb default | specified type // db is existent | from db | specified type (if compatible) -func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { +func openKeyValueDatabase(o OpenOptions, logger log.Logger) (ethdb.Database, error) { existingDb := hasPreexistingDb(o.Directory) if len(existingDb) != 0 && len(o.Type) != 0 && o.Type != existingDb { return nil, fmt.Errorf("db.engine choice was %v but found pre-existing %v database in specified data directory", o.Type, existingDb) } if o.Type == dbPebble || existingDb == dbPebble { if PebbleEnabled { - log.Info("Using pebble as the backing database") - return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + logger.Info("Using pebble as the backing database") + return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, logger) } else { return nil, errors.New("db.engine 'pebble' not supported on this platform") } @@ -289,9 +289,9 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { if len(o.Type) != 0 && o.Type != dbLeveldb { return nil, fmt.Errorf("unknown db.engine %v", o.Type) } - log.Info("Using leveldb as the backing database") + logger.Info("Using leveldb as the backing database") // Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly - return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, logger) } // Open opens both a disk-based key-value database such as leveldb or pebble, but also @@ -299,15 +299,15 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { // set on the provided OpenOptions. // The passed o.AncientDir indicates the path of root ancient directory where // the chain freezer can be opened. -func Open(o OpenOptions, nodeCtx int) (ethdb.Database, error) { - kvdb, err := openKeyValueDatabase(o) +func Open(o OpenOptions, nodeCtx int, logger log.Logger) (ethdb.Database, error) { + kvdb, err := openKeyValueDatabase(o, logger) if err != nil { return nil, err } if len(o.AncientsDirectory) == 0 { return kvdb, nil } - frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, nodeCtx) + frdb, err := NewDatabaseWithFreezer(kvdb, o.AncientsDirectory, o.Namespace, o.ReadOnly, nodeCtx, logger) if err != nil { kvdb.Close() return nil, err @@ -347,7 +347,7 @@ func (s *stat) Count() string { // InspectDatabase traverses the entire database and checks the size // of all different categories of data. -func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { +func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte, logger log.Logger) error { it := db.NewIterator(keyPrefix, keyStart) defer it.Release() @@ -455,7 +455,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { } count++ if count%1000 == 0 && time.Since(logged) > 8*time.Second { - log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logger.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) logged = time.Now() } } @@ -503,7 +503,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { table.Render() if unaccounted.size > 0 { - log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count) + logger.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count) } return nil diff --git a/core/rawdb/databases_64bit.go b/core/rawdb/databases_64bit.go index d5e3a17a70..e5eee20c92 100644 --- a/core/rawdb/databases_64bit.go +++ b/core/rawdb/databases_64bit.go @@ -21,6 +21,7 @@ package rawdb import ( "github.com/dominant-strategies/go-quai/ethdb" "github.com/dominant-strategies/go-quai/ethdb/pebble" + "github.com/dominant-strategies/go-quai/log" ) // Pebble is unsuported on 32bit architecture @@ -28,8 +29,8 @@ const PebbleEnabled = true // NewPebbleDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { - db, err := pebble.New(file, cache, handles, namespace, readonly) +func NewPebbleDBDatabase(file string, cache int, handles int, namespace string, readonly bool, logger log.Logger) (ethdb.Database, error) { + db, err := pebble.New(file, cache, handles, namespace, readonly, logger) if err != nil { return nil, err } diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 4442c4bed3..bcf43777e2 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -85,16 +85,18 @@ type freezer struct { quit chan struct{} wg sync.WaitGroup closeOnce sync.Once + + logger log.Logger } // newFreezer creates a chain freezer that moves ancient chain data into // append-only flat file containers. -func newFreezer(datadir string, namespace string, readonly bool) (*freezer, error) { +func newFreezer(datadir string, namespace string, readonly bool, logger log.Logger) (*freezer, error) { // Create the initial freezer object // Ensure the datadir is not a symbolic link if it exists. if info, err := os.Lstat(datadir); !os.IsNotExist(err) { if info.Mode()&os.ModeSymlink != 0 { - log.Warn("Symbolic link ancient database is not supported", "path", datadir) + logger.Warn("Symbolic link ancient database is not supported", "path", datadir) return nil, errSymlinkDatadir } } @@ -112,6 +114,7 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro instanceLock: lock, trigger: make(chan chan struct{}), quit: make(chan struct{}), + logger: logger, } for name, disableSnappy := range FreezerNoSnappy { table, err := newTable(datadir, name, disableSnappy) @@ -131,7 +134,7 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro lock.Release() return nil, err } - log.Info("Opened ancient database", "database", datadir, "readonly", readonly) + logger.Info("Opened ancient database", "database", datadir, "readonly", readonly) return freezer, nil } @@ -207,30 +210,30 @@ func (f *freezer) AppendAncient(number uint64, hash, header, body, receipts, etx if err != nil { rerr := f.repair() if rerr != nil { - log.Fatal("Failed to repair freezer", "err", rerr) + f.logger.Fatal("Failed to repair freezer", "err", rerr) } - log.Info("Append ancient failed", "number", number, "err", err) + f.logger.Info("Append ancient failed", "number", number, "err", err) } }() // Inject all the components into the relevant data tables if err := f.tables[freezerHashTable].Append(f.frozen, hash[:]); err != nil { - log.Error("Failed to append ancient hash", "number", f.frozen, "hash", hash, "err", err) + f.logger.Error("Failed to append ancient hash", "number", f.frozen, "hash", hash, "err", err) return err } if err := f.tables[freezerHeaderTable].Append(f.frozen, header); err != nil { - log.Error("Failed to append ancient header", "number", f.frozen, "hash", hash, "err", err) + f.logger.Error("Failed to append ancient header", "number", f.frozen, "hash", hash, "err", err) return err } if err := f.tables[freezerBodiesTable].Append(f.frozen, body); err != nil { - log.Error("Failed to append ancient body", "number", f.frozen, "hash", hash, "err", err) + f.logger.Error("Failed to append ancient body", "number", f.frozen, "hash", hash, "err", err) return err } if err := f.tables[freezerReceiptTable].Append(f.frozen, receipts); err != nil { - log.Error("Failed to append ancient receipts", "number", f.frozen, "hash", hash, "err", err) + f.logger.Error("Failed to append ancient receipts", "number", f.frozen, "hash", hash, "err", err) return err } if err := f.tables[freezerEtxSetsTable].Append(f.frozen, etxSet); err != nil { - log.Error("Failed to append ancient etx set", "number", f.frozen, "hash", hash, "err", err) + f.logger.Error("Failed to append ancient etx set", "number", f.frozen, "hash", hash, "err", err) return err } atomic.AddUint64(&f.frozen, 1) // Only modify atomically @@ -283,7 +286,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { for { select { case <-f.quit: - log.Info("Freezer shutting down") + f.logger.Info("Freezer shutting down") return default: } @@ -305,7 +308,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { // Retrieve the freezing threshold. hash := ReadHeadBlockHash(nfdb) if hash == (common.Hash{}) { - log.Debug("Current full block hash unavailable") // new chain, empty database + f.logger.Debug("Current full block hash unavailable") // new chain, empty database backoff = true continue } @@ -314,23 +317,23 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { switch { case number == nil: - log.Error("Current full block number unavailable", "hash", hash) + f.logger.Error("Current full block number unavailable", "hash", hash) backoff = true continue case *number < threshold: - log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold) + f.logger.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold) backoff = true continue case *number-threshold <= f.frozen: - log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen) + f.logger.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen) backoff = true continue } head := ReadHeader(nfdb, hash, *number) if head == nil { - log.Error("Current full block unavailable", "number", *number, "hash", hash) + f.logger.Error("Current full block unavailable", "number", *number, "hash", hash) backoff = true continue } @@ -348,30 +351,30 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { // Retrieves all the components of the canonical block hash := ReadCanonicalHash(nfdb, f.frozen) if hash == (common.Hash{}) { - log.Error("Canonical hash missing, can't freeze", "number", f.frozen) + f.logger.Error("Canonical hash missing, can't freeze", "number", f.frozen) break } header := ReadHeaderRLP(nfdb, hash, f.frozen) if len(header) == 0 { - log.Error("Block header missing, can't freeze", "number", f.frozen, "hash", hash) + f.logger.Error("Block header missing, can't freeze", "number", f.frozen, "hash", hash) break } body := ReadBodyRLP(nfdb, hash, f.frozen) if len(body) == 0 { - log.Error("Block body missing, can't freeze", "number", f.frozen, "hash", hash) + f.logger.Error("Block body missing, can't freeze", "number", f.frozen, "hash", hash) break } receipts := ReadReceiptsRLP(nfdb, hash, f.frozen) if len(receipts) == 0 { - log.Error("Block receipts missing, can't freeze", "number", f.frozen, "hash", hash) + f.logger.Error("Block receipts missing, can't freeze", "number", f.frozen, "hash", hash) break } etxSet := ReadEtxSetRLP(nfdb, hash, f.frozen) if len(etxSet) == 0 { - log.Error("Total etxset missing, can't freeze", "number", f.frozen, "hash", hash) + f.logger.Error("Total etxset missing, can't freeze", "number", f.frozen, "hash", hash) break } - log.Trace("Deep froze ancient block", "number", f.frozen, "hash", hash) + f.logger.Trace("Deep froze ancient block", "number", f.frozen, "hash", hash) // Inject all the components into the relevant data tables if err := f.AppendAncient(f.frozen, hash[:], header, body, receipts, etxSet); err != nil { break @@ -380,7 +383,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { } // Batch of blocks have been frozen, flush them before wiping from leveldb if err := f.Sync(); err != nil { - log.Fatal("Failed to flush frozen tables", "err", err) + f.logger.Fatal("Failed to flush frozen tables", "err", err) } // Wipe out all data from the active database batch := db.NewBatch() @@ -392,7 +395,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { } } if err := batch.Write(); err != nil { - log.Fatal("Failed to delete frozen canonical blocks", "err", err) + f.logger.Fatal("Failed to delete frozen canonical blocks", "err", err) } batch.Reset() @@ -403,13 +406,13 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { if number != 0 { dangling = ReadAllHashes(db, number) for _, hash := range dangling { - log.Trace("Deleting side chain", "number", number, "hash", hash) + f.logger.Trace("Deleting side chain", "number", number, "hash", hash) DeleteBlock(batch, hash, number) } } } if err := batch.Write(); err != nil { - log.Fatal("Failed to delete frozen side blocks", "err", err) + f.logger.Fatal("Failed to delete frozen side blocks", "err", err) } batch.Reset() @@ -419,7 +422,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { for len(dangling) > 0 { drop := make(map[common.Hash]struct{}) for _, hash := range dangling { - log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) + f.logger.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash) drop[hash] = struct{}{} } children := ReadAllHashes(db, tip) @@ -427,7 +430,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { // Dig up the child and ensure it's dangling child := ReadHeader(nfdb, children[i], tip) if child == nil { - log.Error("Missing dangling header", "number", tip, "hash", children[i]) + f.logger.Error("Missing dangling header", "number", tip, "hash", children[i]) continue } if _, ok := drop[child.ParentHash(nodeCtx)]; !ok { @@ -436,14 +439,14 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { continue } // Delete all block data associated with the child - log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash(nodeCtx)) + f.logger.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash(nodeCtx)) DeleteBlock(batch, children[i], tip) } dangling = children tip++ } if err := batch.Write(); err != nil { - log.Fatal("Failed to delete dangling side blocks", "err", err) + f.logger.Fatal("Failed to delete dangling side blocks", "err", err) } } // Log something friendly for the user @@ -453,7 +456,7 @@ func (f *freezer) freeze(db ethdb.KeyValueStore, nodeCtx int) { if n := len(ancients); n > 0 { context = append(context, []interface{}{"hash", ancients[n-1]}...) } - log.Info("Deep froze chain segment", context) + f.logger.Info("Deep froze chain segment", context) // Avoid database thrashing with tiny writes if f.frozen-first < freezerBatchLimit { diff --git a/core/slice.go b/core/slice.go index 1972708d17..6cde909b04 100644 --- a/core/slice.go +++ b/core/slice.go @@ -82,9 +82,10 @@ type Slice struct { reorgMu sync.RWMutex badHashesCache map[common.Hash]bool + logger log.Logger } -func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.Header) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, genesis *Genesis) (*Slice, error) { +func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLookupLimit *uint64, isLocalBlock func(block *types.Header) bool, chainConfig *params.ChainConfig, slicesRunning []common.Location, domClientUrl string, subClientUrls []string, engine consensus.Engine, cacheConfig *CacheConfig, vmConfig vm.Config, genesis *Genesis, logger log.Logger) (*Slice, error) { nodeCtx := chainConfig.Location.Context() sl := &Slice{ config: chainConfig, @@ -92,10 +93,11 @@ func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLooku sliceDb: db, quit: make(chan struct{}), badHashesCache: make(map[common.Hash]bool), + logger: logger, } var err error - sl.hc, err = NewHeaderChain(db, engine, sl.GetPEtxRollupAfterRetryThreshold, sl.GetPEtxAfterRetryThreshold, chainConfig, cacheConfig, txLookupLimit, vmConfig, slicesRunning) + sl.hc, err = NewHeaderChain(db, engine, sl.GetPEtxRollupAfterRetryThreshold, sl.GetPEtxAfterRetryThreshold, chainConfig, cacheConfig, txLookupLimit, vmConfig, slicesRunning, logger) if err != nil { return nil, err } @@ -104,10 +106,10 @@ func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLooku // tx pool is only used in zone if nodeCtx == common.ZONE_CTX && sl.ProcessingState() { - sl.txPool = NewTxPool(*txConfig, chainConfig, sl.hc) + sl.txPool = NewTxPool(*txConfig, chainConfig, sl.hc, logger) sl.hc.pool = sl.txPool } - sl.miner = New(sl.hc, sl.txPool, config, db, chainConfig, engine, isLocalBlock, sl.ProcessingState()) + sl.miner = New(sl.hc, sl.txPool, config, db, chainConfig, engine, isLocalBlock, sl.ProcessingState(), sl.logger) sl.phCache, _ = lru.New(c_phCacheSize) @@ -119,14 +121,14 @@ func NewSlice(db ethdb.Database, config *Config, txConfig *TxPoolConfig, txLooku sl.subClients = make([]*quaiclient.Client, 3) if nodeCtx != common.ZONE_CTX { go func() { - sl.subClients = makeSubClients(subClientUrls) + sl.subClients = makeSubClients(subClientUrls, sl.logger) }() } // only set domClient if the chain is not Prime. if nodeCtx != common.PRIME_CTX { go func() { - sl.domClient = makeDomClient(domClientUrl) + sl.domClient = makeDomClient(domClientUrl, sl.logger) }() } @@ -157,9 +159,9 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do // Only print in Info level if block is c_startingPrintLimit behind or less if sl.CurrentInfo(header) { - log.Info("Starting slice append", "hash", header.Hash(), "number", header.NumberArray(), "location", header.Location(), "parent hash", header.ParentHash(nodeCtx)) + sl.logger.Info("Starting slice append", "hash", header.Hash(), "number", header.NumberArray(), "location", header.Location(), "parent hash", header.ParentHash(nodeCtx)) } else { - log.Debug("Starting slice append", "hash", header.Hash(), "number", header.NumberArray(), "location", header.Location(), "parent hash", header.ParentHash(nodeCtx)) + sl.logger.Debug("Starting slice append", "hash", header.Hash(), "number", header.NumberArray(), "location", header.Location(), "parent hash", header.ParentHash(nodeCtx)) } time0_1 := common.PrettyDuration(time.Since(start)) @@ -176,7 +178,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do } // Don't append the block which already exists in the database. if sl.hc.HasHeader(header.Hash(), header.NumberU64(nodeCtx)) && (sl.hc.GetTerminiByHash(header.Hash()) != nil) { - log.Debug("Block has already been appended: ", "Hash: ", header.Hash()) + sl.logger.Debug("Block has already been appended: ", "Hash: ", header.Hash()) return nil, false, false, nil } time1 := common.PrettyDuration(time.Since(start)) @@ -193,7 +195,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do if err != nil { return nil, false, false, err } - log.Debug("PCRC done", "hash", header.Hash(), "number", header.NumberArray(), "termini", newTermini) + sl.logger.Debug("PCRC done", "hash", header.Hash(), "number", header.NumberArray(), "termini", newTermini) time2 := common.PrettyDuration(time.Since(start)) // Append the new block @@ -230,7 +232,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do } else { newInboundEtxs, _, err = sl.CollectNewlyConfirmedEtxs(block, block.Location()) if err != nil { - log.Trace("Error collecting newly confirmed etxs: ", "err", err) + sl.logger.Trace("Error collecting newly confirmed etxs: ", "err", err) // Keeping track of the number of times pending etx fails and if it crossed the retry threshold // ask the sub for the pending etx/rollup data val, exist := sl.pEtxRetryCache.Get(block.Hash()) @@ -295,7 +297,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do sl.WriteBestPhKey(sl.config.GenesisHash) sl.writePhCache(block.Hash(), pendingHeaderWithTermini) bestPh = types.EmptyPendingHeader() - log.Error("BestPh Key does not exist for", "key", sl.bestPhKey) + sl.logger.Error("BestPh Key does not exist for", "key", sl.bestPhKey) } time8 = common.PrettyDuration(time.Since(start)) @@ -318,7 +320,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do if subReorg || (sl.hc.CurrentHeader().NumberU64(nodeCtx) < block.NumberU64(nodeCtx)+c_currentStateComputeWindow) { err := sl.hc.SetCurrentState(block.Header()) if err != nil { - log.Error("Error setting current state", "err", err, "Hash", block.Hash()) + sl.logger.Error("Error setting current state", "err", err, "Hash", block.Hash()) return nil, false, false, err } } @@ -337,7 +339,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do if order == common.ZONE_CTX && pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation()) != bestPh.Termini().DomTerminus(sl.NodeLocation()) { updateDom = true } - log.Info("Choosing phHeader Append:", "NumberArray:", pendingHeaderWithTermini.Header().NumberArray(), "Number:", pendingHeaderWithTermini.Header().Number(nodeCtx), "ParentHash:", pendingHeaderWithTermini.Header().ParentHash(nodeCtx), "Terminus:", pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation())) + sl.logger.Info("Choosing phHeader Append:", "NumberArray:", pendingHeaderWithTermini.Header().NumberArray(), "Number:", pendingHeaderWithTermini.Header().Number(nodeCtx), "ParentHash:", pendingHeaderWithTermini.Header().ParentHash(nodeCtx), "Terminus:", pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation())) sl.WriteBestPhKey(pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation())) block.SetAppendTime(time.Duration(time9)) } @@ -359,9 +361,9 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do sl.relayPh(block, pendingHeaderWithTermini, domOrigin, block.Location(), subReorg) time10 := common.PrettyDuration(time.Since(start)) - log.Info("Times during append:", "t0_1", time0_1, "t0_2", time0_2, "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10) - log.Debug("Times during sub append:", "t6_1:", time6_1, "t6_2:", time6_2, "t6_3:", time6_3) - log.Info("Appended new block", "number", block.Header().NumberArray(), "hash", block.Hash(), + sl.logger.Info("Times during append:", "t0_1", time0_1, "t0_2", time0_2, "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10) + sl.logger.Debug("Times during sub append:", "t6_1:", time6_1, "t6_2:", time6_2, "t6_3:", time6_3) + sl.logger.Info("Appended new block", "number", block.Header().NumberArray(), "hash", block.Hash(), "difficulty", block.Header().Difficulty(), "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "etxs", len(block.ExtTransactions()), "gas", block.GasUsed(), "gasLimit", block.GasLimit(), "root", block.Root(), @@ -371,7 +373,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do if nodeCtx == common.ZONE_CTX { if updateDom { - log.Info("Append updateDom", "oldTermini():", bestPh.Termini().DomTerminus(sl.NodeLocation()), "newTermini():", pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation()), "location:", sl.NodeLocation()) + sl.logger.Info("Append updateDom", "oldTermini():", bestPh.Termini().DomTerminus(sl.NodeLocation()), "newTermini():", pendingHeaderWithTermini.Termini().DomTerminus(sl.NodeLocation()), "location:", sl.NodeLocation()) if sl.domClient != nil { go sl.domClient.UpdateDom(context.Background(), bestPh.Termini().DomTerminus(sl.NodeLocation()), pendingHeaderWithTermini, sl.NodeLocation()) } @@ -406,7 +408,7 @@ func (sl *Slice) relayPh(block *types.Block, pendingHeaderWithTermini types.Pend sl.miner.worker.pendingHeaderFeed.Send(bestPh.Header()) return } else { - log.Warn("Pending Header for Best ph key does not exist", "best ph key", sl.bestPhKey) + sl.logger.Warn("Pending Header for Best ph key does not exist", "best ph key", sl.bestPhKey) } } else if !domOrigin && subReorg { for _, i := range sl.randomRelayArray() { @@ -426,23 +428,23 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH defer sl.phCacheMu.Unlock() newDomTermini := sl.hc.GetTerminiByHash(pendingHeader.Termini().DomTerminiAtIndex(location.SubIndex(nodeLocation))) if newDomTermini == nil { - log.Warn("New Dom Termini doesn't exists in the database for", "hash", pendingHeader.Termini().DomTerminiAtIndex(location.SubIndex(nodeLocation))) + sl.logger.Warn("New Dom Termini doesn't exists in the database for", "hash", pendingHeader.Termini().DomTerminiAtIndex(location.SubIndex(nodeLocation))) return } newDomTerminus := newDomTermini.DomTerminus(nodeLocation) oldDomTermini := sl.hc.GetTerminiByHash(oldTerminus) if oldDomTermini == nil { - log.Warn("Old Dom Termini doesn't exists in the database for", "hash", oldTerminus) + sl.logger.Warn("Old Dom Termini doesn't exists in the database for", "hash", oldTerminus) return } oldDomTerminus := oldDomTermini.DomTerminus(nodeLocation) // Find the dom TerminusHash with the newTerminus newPh, newDomTerminiExists := sl.readPhCache(newDomTerminus) if !newDomTerminiExists { - log.Warn("Update Dom:", "newTerminus does not exist:", newDomTerminus) + sl.logger.Warn("Update Dom:", "newTerminus does not exist:", newDomTerminus) return } - log.Debug("UpdateDom:", "NewDomTerminus:", newDomTerminus, "OldDomTerminus:", oldDomTerminus, "NewDomTermini:", pendingHeader.Termini().DomTermini(), "Location") + sl.logger.Debug("UpdateDom:", "NewDomTerminus:", newDomTerminus, "OldDomTerminus:", oldDomTerminus, "NewDomTermini:", pendingHeader.Termini().DomTermini(), "Location") if nodeCtx == common.REGION_CTX && oldDomTerminus == newPh.Termini().DomTerminus(nodeLocation) { // Can update sl.WriteBestPhKey(newDomTerminus) @@ -450,17 +452,17 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH if exists { for _, i := range sl.randomRelayArray() { if sl.subClients[i] != nil { - log.Info("SubRelay in UpdateDom", "parent Hash:", newPh.Header().ParentHash(nodeCtx), "Number", newPh.Header().NumberArray(), "newTermini:", newPh.Termini().SubTerminiAtIndex(i)) + sl.logger.Info("SubRelay in UpdateDom", "parent Hash:", newPh.Header().ParentHash(nodeCtx), "Number", newPh.Header().NumberArray(), "newTermini:", newPh.Termini().SubTerminiAtIndex(i)) sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx) } } } else { - log.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus) + sl.logger.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus) } return } else { // need to update dom - log.Info("UpdateDom needs to updateDom", "oldDomTermini:", oldDomTerminus, "newDomTermini:", newPh.Termini(), "location:", location) + sl.logger.Info("UpdateDom needs to updateDom", "oldDomTermini:", oldDomTerminus, "newDomTermini:", newPh.Termini(), "location:", location) if sl.domClient != nil { go sl.domClient.UpdateDom(context.Background(), oldDomTerminus, types.NewPendingHeader(pendingHeader.Header(), newPh.Termini()), location) } else { @@ -470,12 +472,12 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH if exists { for _, i := range sl.randomRelayArray() { if sl.subClients[i] != nil { - log.Info("SubRelay in UpdateDom:", "Parent Hash:", newPh.Header().ParentHash(nodeCtx), "Number", newPh.Header().NumberArray(), "NewTermini:", newPh.Termini().SubTerminiAtIndex(i)) + sl.logger.Info("SubRelay in UpdateDom:", "Parent Hash:", newPh.Header().ParentHash(nodeCtx), "Number", newPh.Header().NumberArray(), "NewTermini:", newPh.Termini().SubTerminiAtIndex(i)) sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx) } } } else { - log.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus) + sl.logger.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus) } return } @@ -602,7 +604,7 @@ func (sl *Slice) CollectNewlyConfirmedEtxs(block *types.Block, location common.L rollup, exists := sl.hc.subRollupCache.Get(block.Hash()) if exists && rollup != nil { subRollup = rollup.(types.Transactions) - log.Info("Found the rollup in cache", "Hash", block.Hash(), "len", len(subRollup)) + sl.logger.Info("Found the rollup in cache", "Hash", block.Hash(), "len", len(subRollup)) } else { subRollup, err = sl.hc.CollectSubRollup(block) if err != nil { @@ -663,7 +665,7 @@ func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus commo nodeCtx := sl.NodeCtx() location := header.Location() - log.Debug("PCRC:", "Parent Hash:", header.ParentHash(nodeCtx), "Number", header.Number, "Location:", header.Location()) + sl.logger.Debug("PCRC:", "Parent Hash:", header.ParentHash(nodeCtx), "Number", header.Number, "Location:", header.Location()) termini := sl.hc.GetTerminiByHash(header.ParentHash(nodeCtx)) if !termini.IsValid() { @@ -686,7 +688,7 @@ func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus commo // Check for a graph cyclic reference if domOrigin { if termini.DomTerminus(nodeLocation) != domTerminus { - log.Warn("Cyclic Block:", "block number", header.NumberArray(), "hash", header.Hash(), "terminus", domTerminus, "termini", termini.DomTerminus(nodeLocation)) + sl.logger.Warn("Cyclic Block:", "block number", header.NumberArray(), "hash", header.Hash(), "terminus", domTerminus, "termini", termini.DomTerminus(nodeLocation)) return common.Hash{}, types.EmptyTermini(), errors.New("termini do not match, block rejected due to cyclic reference") } } @@ -703,7 +705,7 @@ func (sl *Slice) pcrc(batch ethdb.Batch, header *types.Header, domTerminus commo // POEM compares externS to the currentHead S and returns true if externS is greater func (sl *Slice) poem(externS *big.Int, currentS *big.Int) bool { - log.Debug("POEM:", "currentS:", common.BigBitsToBits(currentS), "externS:", common.BigBitsToBits(externS)) + sl.logger.Debug("POEM:", "currentS:", common.BigBitsToBits(currentS), "externS:", common.BigBitsToBits(externS)) reorg := currentS.Cmp(externS) <= 0 return reorg } @@ -857,7 +859,7 @@ func (sl *Slice) computePendingHeader(localPendingHeaderWithTermini types.Pendin var newPh *types.Header if exists { - log.Debug("computePendingHeader:", "hash:", hash, "pendingHeader:", cachedPendingHeaderWithTermini, "termini:", cachedPendingHeaderWithTermini.Termini()) + sl.logger.Debug("computePendingHeader:", "hash:", hash, "pendingHeader:", cachedPendingHeaderWithTermini, "termini:", cachedPendingHeaderWithTermini.Termini()) if domOrigin { newPh = sl.combinePendingHeader(localPendingHeaderWithTermini.Header(), domPendingHeader, nodeCtx, true) return types.NewPendingHeader(types.CopyHeader(newPh), localPendingHeaderWithTermini.Termini()) @@ -901,7 +903,7 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini bestPh, exists := sl.readPhCache(sl.bestPhKey) if nodeCtx == common.ZONE_CTX && exists && sl.bestPhKey != localPendingHeader.Termini().DomTerminus(nodeLocation) && !sl.poem(newEntropy, bestPh.Header().ParentEntropy(nodeCtx)) { - log.Warn("Subrelay Rejected", "local dom terminus", localPendingHeader.Termini().DomTerminus(nodeLocation), "Number", combinedPendingHeader.NumberArray(), "best ph key", sl.bestPhKey, "number", bestPh.Header().NumberArray(), "newentropy", newEntropy) + sl.logger.Warn("Subrelay Rejected", "local dom terminus", localPendingHeader.Termini().DomTerminus(nodeLocation), "Number", combinedPendingHeader.NumberArray(), "best ph key", sl.bestPhKey, "number", bestPh.Header().NumberArray(), "newentropy", newEntropy) sl.updatePhCache(types.NewPendingHeader(combinedPendingHeader, localTermini), false, nil, sl.poem(newEntropy, localPendingHeader.Header().ParentEntropy(nodeCtx)), location) go sl.domClient.UpdateDom(context.Background(), localPendingHeader.Termini().DomTerminus(nodeLocation), bestPh, sl.NodeLocation()) return nil @@ -909,7 +911,7 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini // Pick the head if subReorg { if (localPendingHeader.Header().Root() != types.EmptyRootHash && nodeCtx == common.ZONE_CTX) || nodeCtx == common.REGION_CTX { - log.Info("Choosing phHeader pickPhHead:", "NumberArray:", combinedPendingHeader.NumberArray(), "Number:", combinedPendingHeader.Number(nodeCtx), "ParentHash:", combinedPendingHeader.ParentHash(nodeCtx), "Terminus:", localPendingHeader.Termini().DomTerminus(nodeLocation)) + sl.logger.Info("Choosing phHeader pickPhHead:", "NumberArray:", combinedPendingHeader.NumberArray(), "Number:", combinedPendingHeader.Number(nodeCtx), "ParentHash:", combinedPendingHeader.ParentHash(nodeCtx), "Terminus:", localPendingHeader.Termini().DomTerminus(nodeLocation)) sl.WriteBestPhKey(localPendingHeader.Termini().DomTerminus(nodeLocation)) } else { block := sl.hc.GetBlockByHash(localPendingHeader.Header().ParentHash(nodeCtx)) @@ -918,19 +920,19 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini // after mining this block since the state will already be computed err := sl.hc.SetCurrentState(block.Header()) if err != nil { - log.Error("Error setting current state", "err", err, "Hash", block.Hash()) + sl.logger.Error("Error setting current state", "err", err, "Hash", block.Hash()) return nil } newPendingHeader, err := sl.generateSlicePendingHeader(block, localPendingHeader.Termini(), combinedPendingHeader, true, true, false) if err != nil { - log.Error("Error generating slice pending header", "err", err) + sl.logger.Error("Error generating slice pending header", "err", err) return err } combinedPendingHeader = types.CopyHeader(newPendingHeader.Header()) - log.Info("Choosing phHeader pickPhHead:", "NumberArray:", combinedPendingHeader.NumberArray(), "ParentHash:", combinedPendingHeader.ParentHash(nodeCtx), "Terminus:", localPendingHeader.Termini().DomTerminus(nodeLocation)) + sl.logger.Info("Choosing phHeader pickPhHead:", "NumberArray:", combinedPendingHeader.NumberArray(), "ParentHash:", combinedPendingHeader.ParentHash(nodeCtx), "Terminus:", localPendingHeader.Termini().DomTerminus(nodeLocation)) sl.WriteBestPhKey(localPendingHeader.Termini().DomTerminus(nodeLocation)) } else { - log.Warn("unable to set the current header after the cord update", "Hash", localPendingHeader.Header().ParentHash(nodeCtx)) + sl.logger.Warn("unable to set the current header after the cord update", "Hash", localPendingHeader.Header().ParentHash(nodeCtx)) } } } @@ -939,7 +941,7 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini return nil } - log.Warn("no pending header found for", "terminus", hash, "pendingHeaderNumber", pendingHeader.Header().NumberArray(), "Hash", pendingHeader.Header().ParentHash(nodeCtx), "Termini index", terminiIndex, "indices", indices) + sl.logger.Warn("no pending header found for", "terminus", hash, "pendingHeaderNumber", pendingHeader.Header().NumberArray(), "Hash", pendingHeader.Header().ParentHash(nodeCtx), "Termini index", terminiIndex, "indices", indices) return errors.New("pending header not found in cache") } @@ -1010,7 +1012,7 @@ func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inS if subReorg || !exists { sl.writePhCache(deepCopyPendingHeaderWithTermini.Termini().DomTerminus(nodeLocation), deepCopyPendingHeaderWithTermini) - log.Info("PhCache update:", "new terminus?:", !exists, "inSlice:", inSlice, "Ph Number:", deepCopyPendingHeaderWithTermini.Header().NumberArray(), "Termini:", deepCopyPendingHeaderWithTermini.Termini()) + sl.logger.Info("PhCache update:", "new terminus?:", !exists, "inSlice:", inSlice, "Ph Number:", deepCopyPendingHeaderWithTermini.Header().NumberArray(), "Termini:", deepCopyPendingHeaderWithTermini.Termini()) } } @@ -1091,7 +1093,7 @@ func (sl *Slice) ConstructLocalBlock(header *types.Header) (*types.Block, error) uncles := make([]*types.Header, len(pendingBlockBody.Uncles)) for i, uncle := range pendingBlockBody.Uncles { uncles[i] = uncle - log.Debug("Pending Block uncle", "hash: ", uncle.Hash()) + sl.logger.Debug("Pending Block uncle", "hash: ", uncle.Hash()) } etxs := make([]*types.Transaction, len(pendingBlockBody.ExtTransactions)) for i, etx := range pendingBlockBody.ExtTransactions { @@ -1131,7 +1133,7 @@ func (sl *Slice) ConstructLocalMinedBlock(header *types.Header) (*types.Block, e uncles := make([]*types.Header, len(pendingBlockBody.Uncles)) for i, uncle := range pendingBlockBody.Uncles { uncles[i] = uncle - log.Debug("Pending Block uncle", "hash: ", uncle.Hash()) + sl.logger.Debug("Pending Block uncle", "hash: ", uncle.Hash()) } etxs := make([]*types.Transaction, len(pendingBlockBody.ExtTransactions)) for i, etx := range pendingBlockBody.ExtTransactions { @@ -1185,7 +1187,6 @@ func (sl *Slice) NewGenesisPendingHeader(domPendingHeader *types.Header) { if nodeCtx == common.PRIME_CTX { time.Sleep(10 * time.Second) } - genesisHash := sl.config.GenesisHash // Upate the local pending header localPendingHeader, err := sl.miner.worker.GeneratePendingHeader(sl.hc.GetBlockByHash(genesisHash), false) @@ -1232,25 +1233,25 @@ func (sl *Slice) SubscribeMissingBlockEvent(ch chan<- types.BlockRequest) event. } // MakeDomClient creates the quaiclient for the given domurl -func makeDomClient(domurl string) *quaiclient.Client { +func makeDomClient(domurl string, logger log.Logger) *quaiclient.Client { if domurl == "" { - log.Fatal("dom client url is empty") + logger.Fatal("dom client url is empty") } - domClient, err := quaiclient.Dial(domurl) + domClient, err := quaiclient.Dial(domurl, logger) if err != nil { - log.Fatal("Error connecting to the dominant go-quai client", "err", err) + logger.Fatal("Error connecting to the dominant go-quai client", "err", err) } return domClient } // MakeSubClients creates the quaiclient for the given suburls -func makeSubClients(suburls []string) []*quaiclient.Client { +func makeSubClients(suburls []string, logger log.Logger) []*quaiclient.Client { subClients := make([]*quaiclient.Client, 3) for i, suburl := range suburls { if suburl != "" { - subClient, err := quaiclient.Dial(suburl) + subClient, err := quaiclient.Dial(suburl, logger) if err != nil { - log.Fatal("Error connecting to the subordinate go-quai client for index", "index", i, " err ", err) + logger.Fatal("Error connecting to the subordinate go-quai client for index", "index", i, " err ", err) } subClients[i] = subClient } @@ -1333,11 +1334,11 @@ func (sl *Slice) AddPendingEtxs(pEtxs types.PendingEtxs) error { func (sl *Slice) AddPendingEtxsRollup(pEtxsRollup types.PendingEtxsRollup) error { if !pEtxsRollup.IsValid(trie.NewStackTrie(nil)) { - log.Info("PendingEtxRollup is invalid") + sl.logger.Info("PendingEtxRollup is invalid") return ErrPendingEtxRollupNotValid } nodeCtx := sl.NodeLocation().Context() - log.Debug("Received pending ETXs Rollup", "header: ", pEtxsRollup.Header.Hash(), "Len of Rollup", len(pEtxsRollup.Manifest)) + sl.logger.Debug("Received pending ETXs Rollup", "header: ", pEtxsRollup.Header.Hash(), "Len of Rollup", len(pEtxsRollup.Manifest)) // Only write the pending ETXs if we have not seen them before if !sl.hc.pendingEtxsRollup.Contains(pEtxsRollup.Header.Hash()) { // Also write to cache for faster access @@ -1495,7 +1496,7 @@ func (sl *Slice) ComputeRecoveryPendingHeader(hash common.Hash) types.PendingHea block := sl.hc.GetBlockByHash(hash) pendingHeader, err := sl.miner.worker.GeneratePendingHeader(block, false) if err != nil { - log.Error("Error generating pending header during the checkpoint recovery process") + sl.logger.Error("Error generating pending header during the checkpoint recovery process") return types.PendingHeader{} } termini := sl.hc.GetTerminiByHash(hash) diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 9da3ce0aa2..6349443fff 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -82,10 +82,11 @@ type Pruner struct { trieCachePath string headHeader *types.Header snaptree *snapshot.Tree + logger log.Logger } // NewPruner creates the pruner instance. -func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) { +func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint64, logger log.Logger) (*Pruner, error) { headBlock := rawdb.ReadHeadBlock(db) if headBlock == nil { return nil, errors.New("Failed to load head block") @@ -96,7 +97,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6 } // Sanitize the bloom filter size if it's too small. if bloomSize < 256 { - log.Warn("Sanitizing bloomfilter size", "provided(MB)", bloomSize, "updated(MB)", 256) + logger.Warn("Sanitizing bloomfilter size", "provided(MB)", bloomSize, "updated(MB)", 256) bloomSize = 256 } stateBloom, err := newStateBloomWithSize(bloomSize) @@ -110,6 +111,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6 trieCachePath: trieCachePath, headHeader: headBlock.Header(), snaptree: snaptree, + logger: logger, }, nil } @@ -283,7 +285,7 @@ func (p *Pruner) Prune(root common.Hash) error { if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 { root = layers[i].Root() found = true - log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i) + p.logger.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i) break } } @@ -295,9 +297,9 @@ func (p *Pruner) Prune(root common.Hash) error { } } else { if len(layers) > 0 { - log.Info("Selecting bottom-most difflayer as the pruning target", "root", root, "height", p.headHeader.NumberU64(common.ZONE_CTX)-127) + p.logger.Info("Selecting bottom-most difflayer as the pruning target", "root", root, "height", p.headHeader.NumberU64(common.ZONE_CTX)-127) } else { - log.Info("Selecting user-specified state as the pruning target", "root", root) + p.logger.Info("Selecting user-specified state as the pruning target", "root", root) } } // Before start the pruning, delete the clean trie cache first. @@ -328,11 +330,11 @@ func (p *Pruner) Prune(root common.Hash) error { } filterName := bloomFilterName(p.datadir, root) - log.Info("Writing state bloom to disk", "name", filterName) + p.logger.Info("Writing state bloom to disk", "name", filterName) if err := p.stateBloom.Commit(filterName, filterName+stateBloomFileTempSuffix); err != nil { return err } - log.Info("State bloom filter committed", "name", filterName) + p.logger.Info("State bloom filter committed", "name", filterName) return prune(p.snaptree, root, p.db, p.stateBloom, filterName, middleRoots, start) } diff --git a/core/state_processor.go b/core/state_processor.go index e7f229864a..1659fdd1ec 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -120,6 +120,7 @@ type StateProcessor struct { snaps *snapshot.Tree triegc *prque.Prque // Priority queue mapping block numbers to tries to gc gcproc time.Duration // Accumulates canonical block processing for trie dumping + logger log.Logger } // NewStateProcessor initialises a new StateProcessor. @@ -146,6 +147,7 @@ func NewStateProcessor(config *params.ChainConfig, hc *HeaderChain, engine conse engine: engine, triegc: prque.New(nil), quit: make(chan struct{}), + logger: hc.logger, } sp.validator = NewBlockValidator(config, hc, engine) @@ -161,7 +163,7 @@ func NewStateProcessor(config *params.ChainConfig, hc *HeaderChain, engine conse // If periodic cache journal is required, spin it up. if sp.cacheConfig.TrieCleanRejournal > 0 { if sp.cacheConfig.TrieCleanRejournal < time.Minute { - log.Warn("Sanitizing invalid trie cache journal time", "provided", sp.cacheConfig.TrieCleanRejournal, "updated", time.Minute) + sp.logger.Warn("Sanitizing invalid trie cache journal time", "provided", sp.cacheConfig.TrieCleanRejournal, "updated", time.Minute) sp.cacheConfig.TrieCleanRejournal = time.Minute } triedb := sp.stateCache.TrieDB() @@ -262,7 +264,7 @@ func (p *StateProcessor) Process(block *types.Block, etxSet types.EtxSet) (types return nil, nil, nil, 0, fmt.Errorf("invalid external transaction: etx %x not found in unspent etx set", tx.Hash()) } prevZeroBal := prepareApplyETX(statedb, &etxEntry.ETX) - receipt, err = applyTransaction(msg, p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, &etxEntry.ETX, usedGas, vmenv, &etxRLimit, &etxPLimit) + receipt, err = applyTransaction(msg, p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, &etxEntry.ETX, usedGas, vmenv, &etxRLimit, &etxPLimit, p.logger) statedb.SetBalance(common.ZeroInternal, prevZeroBal) // Reset the balance to what it previously was. Residual balance will be lost if err != nil { @@ -276,7 +278,7 @@ func (p *StateProcessor) Process(block *types.Block, etxSet types.EtxSet) (types } else if tx.Type() == types.InternalTxType || tx.Type() == types.InternalToExternalTxType { startTimeTx := time.Now() - receipt, err = applyTransaction(msg, p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, &etxRLimit, &etxPLimit) + receipt, err = applyTransaction(msg, p.config, p.hc, nil, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv, &etxRLimit, &etxPLimit, p.logger) if err != nil { return nil, nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } @@ -296,15 +298,15 @@ func (p *StateProcessor) Process(block *types.Block, etxSet types.EtxSet) (types p.engine.Finalize(p.hc, header, statedb, block.Transactions(), block.Uncles()) time5 := common.PrettyDuration(time.Since(start)) - log.Debug("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx)) - log.Debug("Time taken in Process", "time1", time1, "time2", time2, "time3", time3, "time4", time4, "time5", time5) + p.logger.Debug("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx)) + p.logger.Debug("Time taken in Process", "time1", time1, "time2", time2, "time3", time3, "time4", time4, "time5", time5) - log.Debug("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "senders cache time", common.PrettyDuration(timeSenders), "percent cached internal txs", fmt.Sprintf("%.2f", float64(len(senders))/float64(numInternalTxs)*100), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx)) + p.logger.Debug("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "senders cache time", common.PrettyDuration(timeSenders), "percent cached internal txs", fmt.Sprintf("%.2f", float64(len(senders))/float64(numInternalTxs)*100), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx)) return receipts, allLogs, statedb, *usedGas, nil } -func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM, etxRLimit, etxPLimit *int) (*types.Receipt, error) { +func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM, etxRLimit, etxPLimit *int, logger log.Logger) (*types.Receipt, error) { nodeLocation := config.Location // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) @@ -347,7 +349,7 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas, Etxs: result.Etxs} if result.Failed() { receipt.Status = types.ReceiptStatusFailed - log.Debug(result.Err.Error()) + logger.Debug(result.Err.Error()) } else { receipt.Status = types.ReceiptStatusSuccessful } @@ -392,7 +394,7 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound return nil, err } if block.Hash() != blockHash { - log.Warn("Block hash changed after Processing the block", "old hash", blockHash, "new hash", block.Hash()) + p.logger.Warn("Block hash changed after Processing the block", "old hash", blockHash, "new hash", block.Hash()) } time3 := common.PrettyDuration(time.Since(start)) err = p.validator.ValidateState(block, statedb, receipts, usedGas) @@ -448,12 +450,12 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound // diff sidechain. Suspend committing until this operation is completed. header := p.hc.GetHeaderByNumber(chosen) if header == nil { - log.Warn("Reorg in progress, trie commit postponed", "number", chosen) + p.logger.Warn("Reorg in progress, trie commit postponed", "number", chosen) } else { // If we're exceeding limits but haven't reached a large enough memory gap, // warn the user that the system is becoming unstable. if chosen < lastWrite+TriesInMemory && p.gcproc >= 2*p.cacheConfig.TrieTimeLimit { - log.Info("State in memory for too long, committing", "time", p.gcproc, "allowance", p.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory) + p.logger.Info("State in memory for too long, committing", "time", p.gcproc, "allowance", p.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/TriesInMemory) } // Flush an entire trie and restart the counters triedb.Commit(header.Root(), true, nil) @@ -477,7 +479,7 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound rawdb.WriteEtxSet(batch, header.Hash(), header.NumberU64(nodeCtx), etxSet) time12 := common.PrettyDuration(time.Since(start)) - log.Debug("times during state processor apply:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t4.5:", time4_5, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10, "t11:", time11, "t12:", time12) + p.logger.Debug("times during state processor apply:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t4.5:", time4_5, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10, "t11:", time11, "t12:", time12) return logs, nil } @@ -485,7 +487,7 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config, etxRLimit, etxPLimit *int) (*types.Receipt, error) { +func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config, etxRLimit, etxPLimit *int, logger log.Logger) (*types.Receipt, error) { nodeCtx := config.Location.Context() msg, err := tx.AsMessage(types.MakeSigner(config, header.Number(nodeCtx)), header.BaseFee()) if err != nil { @@ -496,11 +498,11 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) if tx.Type() == types.ExternalTxType { prevZeroBal := prepareApplyETX(statedb, tx) - receipt, err := applyTransaction(msg, config, bc, author, gp, statedb, header.Number(nodeCtx), header.Hash(), tx, usedGas, vmenv, etxRLimit, etxPLimit) + receipt, err := applyTransaction(msg, config, bc, author, gp, statedb, header.Number(nodeCtx), header.Hash(), tx, usedGas, vmenv, etxRLimit, etxPLimit, logger) statedb.SetBalance(common.ZeroInternal, prevZeroBal) // Reset the balance to what it previously was (currently a failed external transaction removes all the sent coins from the supply and any residual balance is gone as well) return receipt, err } - return applyTransaction(msg, config, bc, author, gp, statedb, header.Number(nodeCtx), header.Hash(), tx, usedGas, vmenv, etxRLimit, etxPLimit) + return applyTransaction(msg, config, bc, author, gp, statedb, header.Number(nodeCtx), header.Hash(), tx, usedGas, vmenv, etxRLimit, etxPLimit, logger) } // GetVMConfig returns the block chain VM config. @@ -685,7 +687,7 @@ func (p *StateProcessor) StateAtBlock(block *types.Block, reexec uint64, base *s current := newHeads[i] // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second && report { - log.Info("Regenerating historical state", "block", current.NumberU64(nodeCtx)+1, "target", origin, "remaining", origin-current.NumberU64(nodeCtx)-1, "elapsed", time.Since(start)) + p.logger.Info("Regenerating historical state", "block", current.NumberU64(nodeCtx)+1, "target", origin, "remaining", origin-current.NumberU64(nodeCtx)-1, "elapsed", time.Since(start)) logged = time.Now() } @@ -722,7 +724,7 @@ func (p *StateProcessor) StateAtBlock(block *types.Block, reexec uint64, base *s } if report { nodes, imgs := database.TrieDB().Size() - log.Info("Historical state regenerated", "block", current.NumberU64(nodeCtx), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) + p.logger.Info("Historical state regenerated", "block", current.NumberU64(nodeCtx), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } return statedb, nil } @@ -777,7 +779,7 @@ func (p *StateProcessor) Stop() { if p.snaps != nil { var err error if snapBase, err = p.snaps.Journal(p.hc.CurrentBlock().Root()); err != nil { - log.Error("Failed to journal state snapshot", "err", err) + p.logger.Error("Failed to journal state snapshot", "err", err) } } // Ensure the state of a recent block is also stored to disk before exiting. @@ -792,23 +794,23 @@ func (p *StateProcessor) Stop() { if number := p.hc.CurrentBlock().NumberU64(nodeCtx); number > offset { recent := p.hc.GetBlockByNumber(number - offset) - log.Info("Writing cached state to disk", "block", recent.Number(nodeCtx), "hash", recent.Hash(), "root", recent.Root()) + p.logger.Info("Writing cached state to disk", "block", recent.Number(nodeCtx), "hash", recent.Hash(), "root", recent.Root()) if err := triedb.Commit(recent.Root(), true, nil); err != nil { - log.Error("Failed to commit recent state trie", "err", err) + p.logger.Error("Failed to commit recent state trie", "err", err) } } } if snapBase != (common.Hash{}) { - log.Info("Writing snapshot state to disk", "root", snapBase) + p.logger.Info("Writing snapshot state to disk", "root", snapBase) if err := triedb.Commit(snapBase, true, nil); err != nil { - log.Error("Failed to commit recent state trie", "err", err) + p.logger.Error("Failed to commit recent state trie", "err", err) } } for !p.triegc.Empty() { triedb.Dereference(p.triegc.PopItem().(common.Hash)) } if size, _ := triedb.Size(); size != 0 { - log.Error("Dangling trie nodes after full cleanup") + p.logger.Error("Dangling trie nodes after full cleanup") } } // Ensure all live cached entries be saved into disk, so that we can skip @@ -818,7 +820,7 @@ func (p *StateProcessor) Stop() { triedb.SaveCache(p.cacheConfig.TrieCleanJournal) } close(p.quit) - log.Info("State Processor stopped") + p.logger.Info("State Processor stopped") } func prepareApplyETX(statedb *state.StateDB, tx *types.Transaction) *big.Int { diff --git a/core/tx_journal.go b/core/tx_journal.go index 82abf25def..972725cb06 100644 --- a/core/tx_journal.go +++ b/core/tx_journal.go @@ -45,12 +45,14 @@ func (*devNull) Close() error { return nil } type txJournal struct { path string // Filesystem path to store the transactions at writer io.WriteCloser // Output stream to write new transactions into + logger log.Logger } // newTxJournal creates a new transaction journal to -func newTxJournal(path string) *txJournal { +func newTxJournal(path string, logger log.Logger) *txJournal { return &txJournal{ - path: path, + path: path, + logger: logger, } } @@ -82,7 +84,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { loadBatch := func(txs types.Transactions) { for _, err := range add(txs) { if err != nil { - log.Debug("Failed to add journaled transaction", "err", err) + journal.logger.Debug("Failed to add journaled transaction", "err", err) dropped++ } } @@ -111,7 +113,7 @@ func (journal *txJournal) load(add func([]*types.Transaction) []error) error { batch = batch[:0] } } - log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) + journal.logger.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) return failure } @@ -163,7 +165,7 @@ func (journal *txJournal) rotate(all map[common.InternalAddress]types.Transactio return err } journal.writer = sink - log.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all)) + journal.logger.Info("Regenerated local transaction journal", "transactions", journaled, "accounts", len(all)) return nil } diff --git a/core/tx_pool.go b/core/tx_pool.go index e9a06fe327..6f1d5fb583 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -157,38 +157,38 @@ var DefaultTxPoolConfig = TxPoolConfig{ // sanitize checks the provided user configurations and changes anything that's // unreasonable or unworkable. -func (config *TxPoolConfig) sanitize() TxPoolConfig { +func (config *TxPoolConfig) sanitize(logger log.Logger) TxPoolConfig { conf := *config if conf.Rejournal < time.Second { - log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) + logger.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) conf.Rejournal = time.Second } if conf.PriceLimit < 1 { - log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) + logger.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) conf.PriceLimit = DefaultTxPoolConfig.PriceLimit } if conf.PriceBump < 1 { - log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) + logger.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) conf.PriceBump = DefaultTxPoolConfig.PriceBump } if conf.AccountSlots < 1 { - log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) + logger.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) conf.AccountSlots = DefaultTxPoolConfig.AccountSlots } if conf.GlobalSlots < 1 { - log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) + logger.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots } if conf.AccountQueue < 1 { - log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) + logger.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) conf.AccountQueue = DefaultTxPoolConfig.AccountQueue } if conf.GlobalQueue < 1 { - log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) + logger.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue } if conf.Lifetime < 1 { - log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) + logger.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) conf.Lifetime = DefaultTxPoolConfig.Lifetime } return conf @@ -239,6 +239,8 @@ type TxPool struct { reorgDoneCh chan chan struct{} reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop wg sync.WaitGroup // tracks loop, scheduleReorgLoop + + logger log.Logger } type txpoolResetRequest struct { @@ -252,9 +254,9 @@ type newSender struct { // NewTxPool creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { +func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain, logger log.Logger) *TxPool { // Sanitize the input to ensure no vulnerable gas prices are set - config = (&config).sanitize() + config = (&config).sanitize(logger) // Create the transaction pool with its initial settings pool := &TxPool{ @@ -278,10 +280,11 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block localTxsCount: 0, remoteTxsCount: 0, reOrgCounter: 0, + logger: logger, } pool.locals = newAccountSet(pool.signer) for _, addr := range config.Locals { - log.Debug("Setting new local account", "address", addr) + logger.Debug("Setting new local account", "address", addr) pool.locals.add(addr) } pool.priced = newTxPricedList(pool.all) @@ -293,13 +296,13 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block // If local transactions and journaling is enabled, load from disk if !config.NoLocals && config.Journal != "" { - pool.journal = newTxJournal(config.Journal) + pool.journal = newTxJournal(config.Journal, logger) if err := pool.journal.load(pool.AddLocals); err != nil { - log.Warn("Failed to load transaction journal", "err", err) + logger.Warn("Failed to load transaction journal", "err", err) } if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate transaction journal", "err", err) + logger.Warn("Failed to rotate transaction journal", "err", err) } } @@ -348,12 +351,12 @@ func (pool *TxPool) loop() { pool.mu.RLock() pending, queued := pool.stats() stales := pool.priced.stales - log.Info("Added Transactions in last Min", "Local Txs", pool.localTxsCount, "Remote Txs", pool.remoteTxsCount) + pool.logger.Info("Added Transactions in last Min", "Local Txs", pool.localTxsCount, "Remote Txs", pool.remoteTxsCount) pool.localTxsCount = 0 pool.remoteTxsCount = 0 pool.mu.RUnlock() - log.Info("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) + pool.logger.Info("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) // Handle inactive account transaction eviction case <-evict.C: pool.mu.Lock() @@ -377,7 +380,7 @@ func (pool *TxPool) loop() { if pool.journal != nil { pool.mu.Lock() if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate local tx journal", "err", err) + pool.logger.Warn("Failed to rotate local tx journal", "err", err) } pool.mu.Unlock() } @@ -397,7 +400,7 @@ func (pool *TxPool) Stop() { if pool.journal != nil { pool.journal.close() } - log.Info("Transaction pool stopped") + pool.logger.Info("Transaction pool stopped") } // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and @@ -432,7 +435,7 @@ func (pool *TxPool) SetGasPrice(price *big.Int) { pool.priced.Removed(len(drop)) } - log.Info("Transaction pool price threshold updated", "price", price) + pool.logger.Info("Transaction pool price threshold updated", "price", price) } // Nonce returns the next nonce of an account, with all transactions executable @@ -520,7 +523,7 @@ func (pool *TxPool) TxPoolPending(enforceTips bool, etxSet types.EtxSet) (map[co if enforceTips && !pool.locals.contains(addr) { for i, tx := range txs { if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { - log.Debug("TX has incorrect or low miner tip", "tx", tx.Hash().String(), "gasTipCap", tx.GasTipCap().String(), "poolGasPrice", pool.gasPrice.String(), "baseFee", pool.priced.urgent.baseFee.String()) + pool.logger.Debug("TX has incorrect or low miner tip", "tx", tx.Hash().String(), "gasTipCap", tx.GasTipCap().String(), "poolGasPrice", pool.gasPrice.String(), "baseFee", pool.priced.urgent.baseFee.String()) txs = txs[:i] break } @@ -535,12 +538,12 @@ func (pool *TxPool) TxPoolPending(enforceTips bool, etxSet types.EtxSet) (map[co addr := entry.ETX.ETXSender() tx := entry.ETX if tx.ETXSender().Location(pool.chainconfig.Location).Equal(pool.chainconfig.Location) { // Sanity check - log.Error("ETX sender is in our location!", "tx", tx.Hash().String(), "sender", tx.ETXSender().String()) + pool.logger.Error("ETX sender is in our location!", "tx", tx.Hash().String(), "sender", tx.ETXSender().String()) continue // skip this tx } // If the miner requests tip enforcement, cap the lists now if enforceTips && tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { - log.Debug("ETX has incorrect or low miner tip", "tx", tx.Hash().String(), "gasTipCap", tx.GasTipCap().String(), "poolGasPrice", pool.gasPrice.String(), "baseFee", pool.priced.urgent.baseFee.String()) + pool.logger.Debug("ETX has incorrect or low miner tip", "tx", tx.Hash().String(), "gasTipCap", tx.GasTipCap().String(), "poolGasPrice", pool.gasPrice.String(), "baseFee", pool.priced.urgent.baseFee.String()) continue // skip this tx } pending[addr.Bytes20()] = append(pending[addr.Bytes20()], &tx) // ETXs do not have to be sorted by address but this way all TXs are in the same list @@ -641,17 +644,17 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { return err } if tx.Gas() < intrGas { - log.Warn("tx has insufficient gas", "gas supplied", tx.Gas(), "gas needed", intrGas, "tx", tx) + pool.logger.Warn("tx has insufficient gas", "gas supplied", tx.Gas(), "gas needed", intrGas, "tx", tx) return ErrIntrinsicGas } if len(pool.sendersCh) == int(pool.config.SendersChBuffer) { - log.Error("sendersCh is full, skipping until there is room") + pool.logger.Error("sendersCh is full, skipping until there is room") } if addToCache { select { case pool.sendersCh <- newSender{tx.Hash(), internal}: // Non-blocking default: - log.Error("sendersCh is full, skipping until there is room") + pool.logger.Error("sendersCh is full, skipping until there is room") } } @@ -669,7 +672,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e // If the transaction is already known, discard it hash := tx.Hash() if pool.all.Get(hash) != nil { - log.Trace("Discarding already known transaction", "hash", hash) + pool.logger.Trace("Discarding already known transaction", "hash", hash) return false, ErrAlreadyKnown } // Make the local flag. If it's from local source or it's from the network but @@ -678,14 +681,14 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e // If the transaction fails basic validation, discard it if err := pool.validateTx(tx, isLocal); err != nil { - log.Trace("Discarding invalid transaction", "hash", hash, "err", err) + pool.logger.Trace("Discarding invalid transaction", "hash", hash, "err", err) return false, err } // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !isLocal && pool.priced.Underpriced(tx) { - log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + pool.logger.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) return false, ErrUnderpriced } // New transaction is better than our worse ones, make room for it. @@ -695,12 +698,12 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e // Special case, we still can't make the room for the new remote one. if !isLocal && !success { - log.Trace("Discarding overflown transaction", "hash", hash) + pool.logger.Trace("Discarding overflown transaction", "hash", hash) return false, ErrTxPoolOverflow } // Kick out the underpriced remote transactions. for _, tx := range drop { - log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + pool.logger.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) pool.removeTx(tx.Hash(), false) } } @@ -718,14 +721,14 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e } // New transaction is better, replace old one if old != nil { - pool.all.Remove(old.Hash()) + pool.all.Remove(old.Hash(), pool.logger) pool.priced.Removed(1) } pool.all.Add(tx, isLocal) pool.priced.Put(tx, isLocal) pool.journalTx(internal, tx) pool.queueTxEvent(tx) - log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) + pool.logger.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) // Successful promotion, bump the heartbeat pool.beats[internal] = time.Now() @@ -738,13 +741,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e } // Mark local addresses and journal local transactions if local && !pool.locals.contains(internal) { - log.Info("Setting new local account", "address", from) + pool.logger.Info("Setting new local account", "address", from) pool.locals.add(internal) pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. } pool.journalTx(internal, tx) pool.queueTxEvent(tx) - log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) + pool.logger.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) return replaced, nil } @@ -768,13 +771,13 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local boo } // Discard any previous transaction and mark this if old != nil { - pool.all.Remove(old.Hash()) + pool.all.Remove(old.Hash(), pool.logger) pool.priced.Removed(1) } // If the transaction isn't in lookup set but it's expected to be there, - // show the error log. + // show the error pool.logger. if pool.all.Get(hash) == nil && !addAll { - log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) + pool.logger.Error("Missing transaction in lookup set, please report the issue", "hash", hash) } if addAll { pool.all.Add(tx, local) @@ -795,7 +798,7 @@ func (pool *TxPool) journalTx(from common.InternalAddress, tx *types.Transaction return } if err := pool.journal.insert(tx); err != nil { - log.Warn("Failed to journal local transaction", "err", err) + pool.logger.Warn("Failed to journal local transaction", "err", err) } } @@ -813,13 +816,13 @@ func (pool *TxPool) promoteTx(addr common.InternalAddress, hash common.Hash, tx inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this - pool.all.Remove(hash) + pool.all.Remove(hash, pool.logger) pool.priced.Removed(1) return false } // Otherwise discard any previous transaction and mark this if old != nil { - pool.all.Remove(old.Hash()) + pool.all.Remove(old.Hash(), pool.logger) pool.priced.Removed(1) } // Set the potentially new pending nonce and notify any subsystems of the new tx @@ -828,7 +831,7 @@ func (pool *TxPool) promoteTx(addr common.InternalAddress, hash common.Hash, tx // Successful promotion, bump the heartbeat pool.beats[addr] = time.Now() if list.Len()%100 == 0 { - log.Info("Another 100 txs added to list", "addr", addr, "len", list.Len()) + pool.logger.Info("Another 100 txs added to list", "addr", addr, "len", list.Len()) } return true } @@ -955,7 +958,7 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, replaced, err := pool.add(tx, local) errs[i] = err if err == nil && !replaced { - dirty.addTx(tx) + dirty.addTx(tx, pool.logger) } } return errs, dirty @@ -1013,7 +1016,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { return } // Remove it from the list of known transactions - pool.all.Remove(hash) + pool.all.Remove(hash, pool.logger) if outofbound { pool.priced.Removed(1) } @@ -1132,7 +1135,7 @@ func (pool *TxPool) scheduleReorgLoop() { addr, _ := types.Sender(pool.signer, tx) internal, err := addr.InternalAddress() if err != nil { - log.Debug("Failed to queue transaction", "err", err) + pool.logger.Debug("Failed to queue transaction", "err", err) continue } if _, ok := queuedEvents[internal]; !ok { @@ -1223,7 +1226,7 @@ func (pool *TxPool) runReorg(done chan struct{}, cancel chan struct{}, reset *tx addr, _ := types.Sender(pool.signer, tx) internal, err := addr.InternalAddress() if err != nil { - log.Debug("Failed to add transaction event", "err", err) + pool.logger.Debug("Failed to add transaction event", "err", err) continue } if _, ok := events[internal]; !ok { @@ -1239,7 +1242,7 @@ func (pool *TxPool) runReorg(done chan struct{}, cancel chan struct{}, reset *tx pool.txFeed.Send(NewTxsEvent{txs}) } if pool.reOrgCounter == c_reorgCounterThreshold { - log.Debug("Time taken to runReorg in txpool", "time", common.PrettyDuration(time.Since(start))) + pool.logger.Debug("Time taken to runReorg in txpool", "time", common.PrettyDuration(time.Since(start))) pool.reOrgCounter = 0 } return @@ -1264,7 +1267,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { newNum := newHead.NumberU64(nodeCtx) if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { - log.Debug("Skipping deep transaction reorg", "depth", depth) + pool.logger.Debug("Skipping deep transaction reorg", "depth", depth) } else { // Reorg seems shallow enough to pull in all transactions into memory var discarded, included types.Transactions @@ -1279,42 +1282,42 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // there's nothing to add if newNum >= oldNum { // If we reorged to a same or higher number, then it's not a case of setHead - log.Warn("Transaction pool reset with missing oldhead", + pool.logger.Warn("Transaction pool reset with missing oldhead", "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) return } // If the reorg ended up on a lower number, it's indicative of setHead being the cause - log.Debug("Skipping transaction reset caused by setHead", + pool.logger.Debug("Skipping transaction reset caused by setHead", "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) // We still need to update the current state s.th. the lost transactions can be readded by the user } else { if rem == nil || add == nil { - log.Error("Unrooted chain seen by tx pool") + pool.logger.Error("Unrooted chain seen by tx pool") return } for rem.NumberU64(nodeCtx) > add.NumberU64(nodeCtx) { discarded = append(discarded, rem.Transactions()...) if rem = pool.chain.GetBlock(rem.ParentHash(nodeCtx), rem.NumberU64(nodeCtx)-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number(nodeCtx), "hash", oldHead.Hash()) + pool.logger.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number(nodeCtx), "hash", oldHead.Hash()) return } } for add.NumberU64(nodeCtx) > rem.NumberU64(nodeCtx) { included = append(included, add.Transactions()...) if add = pool.chain.GetBlock(add.ParentHash(nodeCtx), add.NumberU64(nodeCtx)-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number(nodeCtx), "hash", newHead.Hash()) + pool.logger.Error("Unrooted new chain seen by tx pool", "block", newHead.Number(nodeCtx), "hash", newHead.Hash()) return } } for rem.Hash() != add.Hash() { discarded = append(discarded, rem.Transactions()...) if rem = pool.chain.GetBlock(rem.ParentHash(nodeCtx), rem.NumberU64(nodeCtx)-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number(nodeCtx), "hash", oldHead.Hash()) + pool.logger.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number(nodeCtx), "hash", oldHead.Hash()) return } included = append(included, add.Transactions()...) if add = pool.chain.GetBlock(add.ParentHash(nodeCtx), add.NumberU64(nodeCtx)-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number(nodeCtx), "hash", newHead.Hash()) + pool.logger.Error("Unrooted new chain seen by tx pool", "block", newHead.Number(nodeCtx), "hash", newHead.Hash()) return } } @@ -1328,7 +1331,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { } statedb, err := pool.chain.StateAt(newHead.Root()) if err != nil { - log.Error("Failed to reset txpool state", "err", err) + pool.logger.Error("Failed to reset txpool state", "err", err) return } pool.currentState = statedb @@ -1336,11 +1339,11 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { pool.currentMaxGas = newHead.GasLimit() // Inject any transactions discarded due to reorgs - log.Debug("Reinjecting stale transactions", "count", len(reinject)) + pool.logger.Debug("Reinjecting stale transactions", "count", len(reinject)) senderCacher.recover(pool.signer, reinject) pool.addTxsLocked(reinject, false) if pool.reOrgCounter == c_reorgCounterThreshold { - log.Debug("Time taken to resetTxPool", "time", common.PrettyDuration(time.Since(start))) + pool.logger.Debug("Time taken to resetTxPool", "time", common.PrettyDuration(time.Since(start))) } } @@ -1365,16 +1368,16 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ forwards := list.Forward(pool.currentState.GetNonce(addr)) for _, tx := range forwards { hash := tx.Hash() - pool.all.Remove(hash) + pool.all.Remove(hash, pool.logger) } - log.Trace("Removed old queued transactions", "count", len(forwards)) + pool.logger.Trace("Removed old queued transactions", "count", len(forwards)) // Drop all transactions that are too costly (low balance or out of gas) drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() - pool.all.Remove(hash) + pool.all.Remove(hash, pool.logger) } - log.Trace("Removed unpayable queued transactions", "count", len(drops)) + pool.logger.Trace("Removed unpayable queued transactions", "count", len(drops)) // Gather all executable transactions and promote them readies := list.Ready(pool.pendingNonces.get(addr)) @@ -1384,7 +1387,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ promoted = append(promoted, tx) } } - log.Trace("Promoted queued transactions", "count", len(promoted)) + pool.logger.Trace("Promoted queued transactions", "count", len(promoted)) // Drop all transactions over the allowed limit var caps types.Transactions @@ -1392,8 +1395,8 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ caps = list.Cap(int(pool.config.AccountQueue)) for _, tx := range caps { hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed cap-exceeding queued transaction", "hash", hash) + pool.all.Remove(hash, pool.logger) + pool.logger.Trace("Removed cap-exceeding queued transaction", "hash", hash) } } // Mark all the items dropped as removed @@ -1405,7 +1408,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ } } if pool.reOrgCounter == c_reorgCounterThreshold { - log.Debug("Time taken to promoteExecutables", "time", common.PrettyDuration(time.Since(start))) + pool.logger.Debug("Time taken to promoteExecutables", "time", common.PrettyDuration(time.Since(start))) } return promoted } @@ -1455,11 +1458,11 @@ func (pool *TxPool) truncatePending() { for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() - pool.all.Remove(hash) + pool.all.Remove(hash, pool.logger) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) + pool.logger.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) pending-- @@ -1478,11 +1481,11 @@ func (pool *TxPool) truncatePending() { for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() - pool.all.Remove(hash) + pool.all.Remove(hash, pool.logger) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(addr, tx.Nonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) + pool.logger.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) pending-- @@ -1490,7 +1493,7 @@ func (pool *TxPool) truncatePending() { } } if pool.reOrgCounter == c_reorgCounterThreshold { - log.Debug("Time taken to truncatePending", "time", common.PrettyDuration(time.Since(start))) + pool.logger.Debug("Time taken to truncatePending", "time", common.PrettyDuration(time.Since(start))) } } @@ -1540,7 +1543,7 @@ func (pool *TxPool) truncateQueue() { } } if pool.reOrgCounter == c_reorgCounterThreshold { - log.Debug("Time taken to truncateQueue", "time", common.PrettyDuration(time.Since(start))) + pool.logger.Debug("Time taken to truncateQueue", "time", common.PrettyDuration(time.Since(start))) } } @@ -1564,20 +1567,20 @@ func (pool *TxPool) demoteUnexecutables() { olds := list.Forward(nonce) for _, tx := range olds { hash := tx.Hash() - pool.all.Remove(hash) - log.Debug("Removed old pending transaction", "hash", hash) + pool.all.Remove(hash, pool.logger) + pool.logger.Debug("Removed old pending transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() - log.Trace("Removed unpayable pending transaction", "hash", hash) - pool.all.Remove(hash) + pool.logger.Trace("Removed unpayable pending transaction", "hash", hash) + pool.all.Remove(hash, pool.logger) } for _, tx := range invalids { hash := tx.Hash() - log.Trace("Demoting pending transaction", "hash", hash) + pool.logger.Trace("Demoting pending transaction", "hash", hash) // Internal shuffle shouldn't touch the lookup set. pool.enqueueTx(hash, tx, false, false) @@ -1585,7 +1588,7 @@ func (pool *TxPool) demoteUnexecutables() { // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { gapped := list.Cap(0) - log.Error("Demoting invalidated transactions", "count", len(gapped)) + pool.logger.Error("Demoting invalidated transactions", "count", len(gapped)) for _, tx := range gapped { hash := tx.Hash() @@ -1599,7 +1602,7 @@ func (pool *TxPool) demoteUnexecutables() { } } if pool.reOrgCounter == c_reorgCounterThreshold { - log.Debug("Time taken to demoteExecutables", "time", common.PrettyDuration(time.Since(start))) + pool.logger.Debug("Time taken to demoteExecutables", "time", common.PrettyDuration(time.Since(start))) } } @@ -1638,7 +1641,7 @@ func (pool *TxPool) sendersGoroutine() { pool.senders.Delete(pool.senders.Oldest().Key) // FIFO } } else { - log.Debug("Tx already seen in sender cache (reorg?)", "tx", tx.hash.String(), "sender", tx.sender.String()) + pool.logger.Debug("Tx already seen in sender cache (reorg?)", "tx", tx.hash.String(), "sender", tx.sender.String()) } pool.SendersMutex.Unlock() } @@ -1708,11 +1711,11 @@ func (as *accountSet) add(addr common.InternalAddress) { } // addTx adds the sender of tx into the set. -func (as *accountSet) addTx(tx *types.Transaction) { +func (as *accountSet) addTx(tx *types.Transaction, logger log.Logger) { if addr, err := types.Sender(as.signer, tx); err == nil { internal, err := addr.InternalAddress() if err != nil { - log.Debug("Failed to add tx to account set", "err", err) + logger.Debug("Failed to add tx to account set", "err", err) return } as.add(internal) @@ -1864,7 +1867,7 @@ func (t *txLookup) Add(tx *types.Transaction, local bool) { } // Remove removes a transaction from the lookup. -func (t *txLookup) Remove(hash common.Hash) { +func (t *txLookup) Remove(hash common.Hash, logger log.Logger) { t.lock.Lock() defer t.lock.Unlock() @@ -1873,7 +1876,7 @@ func (t *txLookup) Remove(hash common.Hash) { tx, ok = t.remotes[hash] } if !ok { - log.Error("No transaction found to be deleted", "hash", hash) + logger.Error("No transaction found to be deleted", "hash", hash) return } t.slots -= numSlots(tx) diff --git a/core/worker.go b/core/worker.go index a038451e52..c19e0d8688 100644 --- a/core/worker.go +++ b/core/worker.go @@ -232,6 +232,8 @@ type worker struct { // Test hooks newTaskHook func(*task) // Method to call upon receiving a new sealing task. fullTaskHook func() // Method to call before pushing the full sealing task. + + logger log.Logger } type RollingAverage struct { @@ -256,7 +258,7 @@ func (ra *RollingAverage) Average() time.Duration { return ra.sum / time.Duration(len(ra.durations)) } -func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Database, engine consensus.Engine, headerchain *HeaderChain, txPool *TxPool, isLocalBlock func(header *types.Header) bool, init bool, processingState bool) *worker { +func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Database, engine consensus.Engine, headerchain *HeaderChain, txPool *TxPool, isLocalBlock func(header *types.Header) bool, init bool, processingState bool, logger log.Logger) *worker { worker := &worker{ config: config, chainConfig: chainConfig, @@ -276,6 +278,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Databas resubmitIntervalCh: make(chan time.Duration), resubmitAdjustCh: make(chan *intervalAdjust, resubmitAdjustChanSize), fillTransactionsRollingAverage: &RollingAverage{windowSize: 100}, + logger: logger, } // Set the GasFloor of the worker to the minGasLimit worker.config.GasFloor = params.MinGasLimit @@ -286,7 +289,7 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, db ethdb.Databas // Sanitize recommit interval if the user-specified one is too short. recommit := worker.config.Recommit if recommit < minRecommitInterval { - log.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) + logger.Warn("Sanitizing miner recommit interval", "provided", recommit, "updated", minRecommitInterval) recommit = minRecommitInterval } @@ -442,7 +445,7 @@ func (w *worker) asyncStateLoop() { block := head.Block header, err := w.GeneratePendingHeader(block, true) if err != nil { - log.Error("Error generating pending header with state", "err", err) + w.logger.Error("Error generating pending header with state", "err", err) return } // Send the updated pendingHeader in the asyncPhFeed @@ -479,7 +482,7 @@ func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.He // Set the coinbase if the worker is running or it's required var coinbase common.Address if w.coinbase.Equal(common.ZeroAddr) { - log.Error("Refusing to mine without etherbase") + w.logger.Error("Refusing to mine without etherbase") return nil, errors.New("etherbase not found") } coinbase = w.coinbase // Use the preset address as the fee recipient @@ -499,7 +502,7 @@ func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.He start := time.Now() w.fillTransactions(interrupt, work, block) w.fillTransactionsRollingAverage.Add(time.Since(start)) - log.Info("Filled and sorted pending transactions", "count", len(work.txs), "elapsed", common.PrettyDuration(time.Since(start)), "average", common.PrettyDuration(w.fillTransactionsRollingAverage.Average())) + w.logger.Info("Filled and sorted pending transactions", "count", len(work.txs), "elapsed", common.PrettyDuration(time.Since(start)), "average", common.PrettyDuration(w.fillTransactionsRollingAverage.Average())) } } @@ -526,12 +529,12 @@ func (w *worker) GeneratePendingHeader(block *types.Block, fill bool) (*types.He func (w *worker) printPendingHeaderInfo(work *environment, block *types.Block, start time.Time) { work.uncleMu.RLock() if w.CurrentInfo(block.Header()) { - log.Info("Commit new sealing work", "number", block.Number(w.hc.NodeCtx()), "sealhash", block.Header().SealHash(), + w.logger.Info("Commit new sealing work", "number", block.Number(w.hc.NodeCtx()), "sealhash", block.Header().SealHash(), "uncles", len(work.uncles), "txs", work.tcount, "etxs", len(block.ExtTransactions()), "gas", block.GasUsed(), "fees", totalFees(block, work.receipts), "elapsed", common.PrettyDuration(time.Since(start))) } else { - log.Debug("Commit new sealing work", "number", block.Number(w.hc.NodeCtx()), "sealhash", block.Header().SealHash(), + w.logger.Debug("Commit new sealing work", "number", block.Number(w.hc.NodeCtx()), "sealhash", block.Header().SealHash(), "uncles", len(work.uncles), "txs", work.tcount, "etxs", len(block.ExtTransactions()), "gas", block.GasUsed(), "fees", totalFees(block, work.receipts), "elapsed", common.PrettyDuration(time.Since(start))) @@ -624,9 +627,9 @@ func (w *worker) commitTransaction(env *environment, tx *types.Transaction) ([]* snap := env.state.Snapshot() // retrieve the gas used int and pass in the reference to the ApplyTransaction gasUsed := env.header.GasUsed() - receipt, err := ApplyTransaction(w.chainConfig, w.hc, &env.coinbase, env.gasPool, env.state, env.header, tx, &gasUsed, *w.hc.bc.processor.GetVMConfig(), &env.etxRLimit, &env.etxPLimit) + receipt, err := ApplyTransaction(w.chainConfig, w.hc, &env.coinbase, env.gasPool, env.state, env.header, tx, &gasUsed, *w.hc.bc.processor.GetVMConfig(), &env.etxRLimit, &env.etxPLimit, w.logger) if err != nil { - log.Debug("Error playing transaction in worker", "err", err, "tx", tx.Hash().Hex(), "block", env.header.Number, "gasUsed", gasUsed) + w.logger.Debug("Error playing transaction in worker", "err", err, "tx", tx.Hash().Hex(), "block", env.header.Number, "gasUsed", gasUsed) env.state.RevertToSnapshot(snap) return nil, err } @@ -674,7 +677,7 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP } // If we don't have enough gas for any further transactions then we're done if env.gasPool.Gas() < params.TxGas { - log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) + w.logger.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) break } // Retrieve the next transaction and abort if all done @@ -694,22 +697,22 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP switch { case errors.Is(err, ErrGasLimitReached): // Pop the current out-of-gas transaction without shifting in the next from the account - log.Trace("Gas limit exceeded for current block", "sender", from) + w.logger.Trace("Gas limit exceeded for current block", "sender", from) txs.PopNoSort() case errors.Is(err, ErrEtxLimitReached): // Pop the current transaction without shifting in the next from the account - log.Trace("Etx limit exceeded for current block", "sender", from) + w.logger.Trace("Etx limit exceeded for current block", "sender", from) txs.PopNoSort() case errors.Is(err, ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) + w.logger.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) txs.Shift(from.Bytes20(), false) case errors.Is(err, ErrNonceTooHigh): // Reorg notification data race between the transaction pool and miner, skip account = - log.Debug("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce()) + w.logger.Debug("Skipping account with high nonce", "sender", from, "nonce", tx.Nonce()) txs.PopNoSort() case errors.Is(err, nil): @@ -720,18 +723,18 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP case errors.Is(err, ErrTxTypeNotSupported): // Pop the unsupported transaction without shifting in the next from the account - log.Error("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + w.logger.Error("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) txs.PopNoSort() case strings.Contains(err.Error(), "emits too many cross"): // This is ErrEtxLimitReached with more info // Pop the unsupported transaction without shifting in the next from the account - log.Trace("Etx limit exceeded for current block", "sender", from, "err", err) + w.logger.Trace("Etx limit exceeded for current block", "sender", from, "err", err) txs.PopNoSort() default: // Strange error, discard the transaction and get the next in line (note, the // nonce-too-high clause will prevent us from executing in vain). - log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) + w.logger.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) txs.Shift(from.Bytes20(), false) } } @@ -810,7 +813,7 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en header.SetBaseFee(misc.CalcBaseFee(w.chainConfig, parent.Header())) if w.isRunning() { if w.coinbase.Equal(common.ZeroAddr) { - log.Error("Refusing to mine without etherbase") + w.logger.Error("Refusing to mine without etherbase") return nil, errors.New("refusing to mine without etherbase") } header.SetCoinbase(w.coinbase) @@ -818,12 +821,12 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en // Run the consensus preparation with the default or customized consensus engine. if err := w.engine.Prepare(w.hc, header, block.Header()); err != nil { - log.Error("Failed to prepare header for sealing", "err", err) + w.logger.Error("Failed to prepare header for sealing", "err", err) return nil, err } env, err := w.makeEnv(parent, header, w.coinbase) if err != nil { - log.Error("Failed to create sealing context", "err", err) + w.logger.Error("Failed to create sealing context", "err", err) return nil, err } // Accumulate the uncles for the sealing work. @@ -836,9 +839,9 @@ func (w *worker) prepareWork(genParams *generateParams, block *types.Block) (*en } env.uncleMu.RUnlock() if err := w.commitUncle(env, uncle.Header()); err != nil { - log.Trace("Possible uncle rejected", "hash", hash, "reason", err) + w.logger.Trace("Possible uncle rejected", "hash", hash, "reason", err) } else { - log.Debug("Committing new uncle to block", "hash", hash) + w.logger.Debug("Committing new uncle to block", "hash", hash) } } } @@ -963,13 +966,13 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti select { case w.taskCh <- &task{receipts: env.receipts, state: env.state, block: block, createdAt: time.Now()}: env.uncleMu.RLock() - log.Info("Commit new sealing work", "number", block.Number(nodeCtx), "sealhash", block.Header().SealHash(), + w.logger.Info("Commit new sealing work", "number", block.Number(nodeCtx), "sealhash", block.Header().SealHash(), "uncles", len(env.uncles), "txs", env.tcount, "etxs", len(block.ExtTransactions()), "gas", block.GasUsed(), "fees", totalFees(block, env.receipts), "elapsed", common.PrettyDuration(time.Since(start))) env.uncleMu.RUnlock() case <-w.exitCh: - log.Info("worker has exited") + w.logger.Info("worker has exited") } } @@ -1000,7 +1003,7 @@ func (w *worker) GetPendingBlockBody(header *types.Header) *types.Body { if ok { return body.(*types.Body) } - log.Warn("pending block body not found for header: ", key) + w.logger.Warn("pending block body not found for header: ", key) return nil } diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 7cd14e33c2..6f6de32985 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -61,12 +61,12 @@ type Database struct { quitLock sync.Mutex // Mutex protecting the quit channel access quitChan chan chan error // Quit channel to stop the metrics collection before closing the database - log log.Logger // Contextual logger tracking the database path + logger log.Logger // Contextual logger tracking the database path } // New returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { +func New(file string, cache int, handles int, namespace string, readonly bool, logger log.Logger) (*Database, error) { return NewCustom(file, namespace, func(options *opt.Options) { // Ensure we have some minimal caching and file guarantees if cache < minCache { @@ -82,20 +82,20 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( if readonly { options.ReadOnly = true } - }) + }, logger) } // NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. // The customize function allows the caller to modify the leveldb options. -func NewCustom(file string, namespace string, customize func(options *opt.Options)) (*Database, error) { +func NewCustom(file string, namespace string, customize func(options *opt.Options), logger log.Logger) (*Database, error) { options := configureOptions(customize) usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2 logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()} if options.ReadOnly { logCtx = append(logCtx, "readonly", "true") } - log.Info("Allocated cache and file handles") + logger.Info("Allocated cache and file handles") // Open the db and recover any potential corruptions db, err := leveldb.OpenFile(file, options) @@ -110,6 +110,7 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option fn: file, db: db, quitChan: make(chan chan error), + logger: logger, } return ldb, nil @@ -138,7 +139,7 @@ func (db *Database) Close() error { errc := make(chan error) db.quitChan <- errc if err := <-errc; err != nil { - db.log.Error("Metrics collection failed", "err", err) + db.logger.Error("Metrics collection failed", "err", err) } db.quitChan = nil } @@ -252,7 +253,7 @@ func (db *Database) meter(refresh time.Duration) { // Retrieve the database stats stats, err := db.db.GetProperty("leveldb.stats") if err != nil { - db.log.Error("Failed to read database stats", "err", err) + db.logger.Error("Failed to read database stats", "err", err) merr = err continue } @@ -262,7 +263,7 @@ func (db *Database) meter(refresh time.Duration) { lines = lines[1:] } if len(lines) <= 3 { - db.log.Error("Compaction leveldbTable not found") + db.logger.Error("Compaction leveldbTable not found") merr = errors.New("compaction leveldbTable not found") continue } @@ -280,7 +281,7 @@ func (db *Database) meter(refresh time.Duration) { for idx, counter := range parts[2:] { value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) if err != nil { - db.log.Error("Compaction entry parsing failed", "err", err) + db.logger.Error("Compaction entry parsing failed", "err", err) merr = err continue } @@ -290,7 +291,7 @@ func (db *Database) meter(refresh time.Duration) { // Retrieve the write delay statistic writedelay, err := db.db.GetProperty("leveldb.writedelay") if err != nil { - db.log.Error("Failed to read database write delay statistic", "err", err) + db.logger.Error("Failed to read database write delay statistic", "err", err) merr = err continue } @@ -301,13 +302,13 @@ func (db *Database) meter(refresh time.Duration) { paused bool ) if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { - db.log.Error("Write delay statistic not found") + db.logger.Error("Write delay statistic not found") merr = err continue } duration, err = time.ParseDuration(delayDuration) if err != nil { - db.log.Error("Failed to parse delay duration", "err", err) + db.logger.Error("Failed to parse delay duration", "err", err) merr = err continue } @@ -315,7 +316,7 @@ func (db *Database) meter(refresh time.Duration) { // warnings will be withheld for one minute not to overwhelm the user. if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && time.Now().After(lastWritePaused.Add(degradationWarnInterval)) { - db.log.Warn("Database compacting, degraded performance") + db.logger.Warn("Database compacting, degraded performance") lastWritePaused = time.Now() } delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() @@ -323,24 +324,24 @@ func (db *Database) meter(refresh time.Duration) { // Retrieve the database iostats. ioStats, err := db.db.GetProperty("leveldb.iostats") if err != nil { - db.log.Error("Failed to read database iostats", "err", err) + db.logger.Error("Failed to read database iostats", "err", err) merr = err continue } var nRead, nWrite float64 parts := strings.Split(ioStats, " ") if len(parts) < 2 { - db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) + db.logger.Error("Bad syntax of ioStats", "ioStats", ioStats) merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) continue } if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { - db.log.Error("Bad syntax of read entry", "entry", parts[0]) + db.logger.Error("Bad syntax of read entry", "entry", parts[0]) merr = err continue } if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { - db.log.Error("Bad syntax of write entry", "entry", parts[1]) + db.logger.Error("Bad syntax of write entry", "entry", parts[1]) merr = err continue } @@ -348,7 +349,7 @@ func (db *Database) meter(refresh time.Duration) { compCount, err := db.db.GetProperty("leveldb.compcount") if err != nil { - db.log.Error("Failed to read database iostats", "err", err) + db.logger.Error("Failed to read database iostats", "err", err) merr = err continue } @@ -360,7 +361,7 @@ func (db *Database) meter(refresh time.Duration) { seekComp uint32 ) if n, err := fmt.Sscanf(compCount, "MemComp:%d Level0Comp:%d NonLevel0Comp:%d SeekComp:%d", &memComp, &level0Comp, &nonLevel0Comp, &seekComp); n != 4 || err != nil { - db.log.Error("Compaction count statistic not found") + db.logger.Error("Compaction count statistic not found") merr = err continue } diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index fe685c43ad..2fdb7f182d 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -55,7 +55,7 @@ type Database struct { quitChan chan chan error // Quit channel to stop the metrics collection before closing the database closed bool // keep track of whether we're Closed - log log.Logger // Contextual logger tracking the database path + logger log.Logger // Contextual logger tracking the database path activeComp int // Current number of active compactions compStartTime time.Time // The start time of the earliest currently-active compaction @@ -99,7 +99,7 @@ func (d *Database) onWriteStallEnd() { // New returns a wrapped pebble DB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { +func New(file string, cache int, handles int, namespace string, readonly bool, logger log.Logger) (*Database, error) { // Ensure we have some minimal caching and file guarantees if cache < minCache { cache = minCache @@ -107,7 +107,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( if handles < minHandles { handles = minHandles } - log.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) + logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) // The max memtable size is limited by the uint32 offsets stored in // internal/arenaskl.node, DeferredBatchOp, and flushableBatchEntry. @@ -124,6 +124,7 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( db := &Database{ fn: file, quitChan: make(chan chan error), + logger: logger, } opt := &pebble.Options{ // Pebble has a single combined cache area and the write @@ -194,7 +195,7 @@ func (d *Database) Close() error { errc := make(chan error) d.quitChan <- errc if err := <-errc; err != nil { - d.log.Error("Metrics collection failed", "err", err) + d.logger.Error("Metrics collection failed", "err", err) } d.quitChan = nil } diff --git a/log/interface.go b/log/interface.go index 11eea0df23..427bc36e71 100644 --- a/log/interface.go +++ b/log/interface.go @@ -1,21 +1,24 @@ package log import ( + "fmt" + "strings" + "github.com/sirupsen/logrus" ) type Logger interface { - Trace(keyvals ...interface{}) + Trace(msg string, args ...interface{}) Tracef(msg string, args ...interface{}) - Debug(keyvals ...interface{}) + Debug(msg string, args ...interface{}) Debugf(msg string, args ...interface{}) - Info(keyvals ...interface{}) + Info(msg string, args ...interface{}) Infof(msg string, args ...interface{}) - Warn(keyvals ...interface{}) + Warn(msg string, args ...interface{}) Warnf(msg string, args ...interface{}) - Error(keyvals ...interface{}) + Error(msg string, args ...interface{}) Errorf(msg string, args ...interface{}) - Fatal(keyvals ...interface{}) + Fatal(msg string, args ...interface{}) Fatalf(msg string, args ...interface{}) WithField(key string, val interface{}) Logger @@ -28,48 +31,48 @@ type LogWrapper struct { // Interface assertion var _ Logger = (*LogWrapper)(nil) -func (l *LogWrapper) Trace(keyvals ...interface{}) { - l.entry.Trace(keyvals...) +func (l *LogWrapper) Trace(msg string, args ...interface{}) { + l.entry.Trace(ConstructLogMessage(msg, args...)) } func (l *LogWrapper) Tracef(msg string, args ...interface{}) { l.entry.Tracef(msg, args...) } -func (l *LogWrapper) Debug(keyvals ...interface{}) { - l.entry.Debug(keyvals...) +func (l *LogWrapper) Debug(msg string, args ...interface{}) { + l.entry.Debug(ConstructLogMessage(msg, args...)) } func (l *LogWrapper) Debugf(msg string, args ...interface{}) { l.entry.Debugf(msg, args...) } -func (l *LogWrapper) Info(keyvals ...interface{}) { - l.entry.Info(keyvals...) +func (l *LogWrapper) Info(msg string, args ...interface{}) { + l.entry.Info(ConstructLogMessage(msg, args...)) } func (l *LogWrapper) Infof(msg string, args ...interface{}) { l.entry.Infof(msg, args...) } -func (l *LogWrapper) Warn(keyvals ...interface{}) { - l.entry.Warn(keyvals...) +func (l *LogWrapper) Warn(msg string, args ...interface{}) { + l.entry.Warn(ConstructLogMessage(msg, args...)) } func (l *LogWrapper) Warnf(msg string, args ...interface{}) { l.entry.Warnf(msg, args...) } -func (l *LogWrapper) Error(keyvals ...interface{}) { - l.entry.Error(keyvals...) +func (l *LogWrapper) Error(msg string, args ...interface{}) { + l.entry.Error(ConstructLogMessage(msg, args...)) } func (l *LogWrapper) Errorf(msg string, args ...interface{}) { l.entry.Errorf(msg, args...) } -func (l *LogWrapper) Fatal(keyvals ...interface{}) { - l.entry.Fatal(keyvals...) +func (l *LogWrapper) Fatal(msg string, args ...interface{}) { + l.entry.Fatal(ConstructLogMessage(msg, args...)) } func (l *LogWrapper) Fatalf(msg string, args ...interface{}) { @@ -79,3 +82,21 @@ func (l *LogWrapper) Fatalf(msg string, args ...interface{}) { func (l *LogWrapper) WithField(key string, val interface{}) Logger { return &LogWrapper{entry: l.entry.WithField(key, val)} } + +func ConstructLogMessage(msg string, fields ...interface{}) string { + var pairs []string + + if len(fields) != 1 { + // Sometimes we want to log a single string, + if len(fields)%2 != 0 { + fields = append(fields, "MISSING VALUE") + } + + for i := 0; i < len(fields); i += 2 { + key := fields[i] + value := fields[i+1] + pairs = append(pairs, fmt.Sprintf("%v=%v", key, value)) + } + } + return fmt.Sprintf("%-40s %s", msg, strings.Join(pairs, " ")) +} diff --git a/log/logger.go b/log/logger.go index f3701b288d..5a0992d91e 100644 --- a/log/logger.go +++ b/log/logger.go @@ -36,9 +36,29 @@ func init() { } ConfigureLogger( WithLevel(defaultLogLevel), - WithOutput(ToStdOut(), ToLogFile(defaultLogFilePath)), + WithOutput(ToLogFile(defaultLogFilePath)), ) - logger.Infof("Logger started. Writing logs to: %s", defaultLogFilePath) + logger.Infof("Global Logger started. Writing logs to: %s", defaultLogFilePath) +} + +func New(logFilePath string, logLevel string) *LogWrapper { + newLogger := logrus.New() + newLogger.SetOutput(ToLogFile(logFilePath)) + entry := logrus.NewEntry(newLogger) + newWrapper := &LogWrapper{ + entry: entry, + } + ConfigureCustomLogger(newWrapper, + WithLevel(logLevel), + ) + newWrapper.Info("Shard Logger started", "path", logFilePath, "level", logLevel) + return newWrapper +} + +func ConfigureCustomLogger(logger *LogWrapper, opts ...Options) { + for _, opt := range opts { + opt(logger) + } } func ConfigureLogger(opts ...Options) { @@ -47,54 +67,54 @@ func ConfigureLogger(opts ...Options) { } } -func Trace(keyvals ...interface{}) { - logger.Trace(keyvals...) +func WithField(key string, val interface{}) Logger { + return logger.WithField(key, val) +} + +func Trace(msg string, args ...interface{}) { + logger.Trace(ConstructLogMessage(msg, args...)) } func Tracef(msg string, args ...interface{}) { logger.Tracef(msg, args...) } -func Debug(keyvals ...interface{}) { - logger.Debug(keyvals...) +func Debug(msg string, args ...interface{}) { + logger.Debug(ConstructLogMessage(msg, args...)) } func Debugf(msg string, args ...interface{}) { logger.Debugf(msg, args...) } -func Info(keyvals ...interface{}) { - logger.Info(keyvals...) +func Info(msg string, args ...interface{}) { + logger.Info(ConstructLogMessage(msg, args...)) } func Infof(msg string, args ...interface{}) { logger.Infof(msg, args...) } -func Warn(keyvals ...interface{}) { - logger.Warn(keyvals...) +func Warn(msg string, args ...interface{}) { + logger.Warn(ConstructLogMessage(msg, args...)) } func Warnf(msg string, args ...interface{}) { logger.Warnf(msg, args...) } -func Error(keyvals ...interface{}) { - logger.Error(keyvals...) +func Error(msg string, args ...interface{}) { + logger.Error(ConstructLogMessage(msg, args...)) } func Errorf(msg string, args ...interface{}) { logger.Errorf(msg, args...) } -func Fatal(keyvals ...interface{}) { - logger.Fatal(keyvals...) +func Fatal(msg string, args ...interface{}) { + logger.Fatal(ConstructLogMessage(msg, args...)) } func Fatalf(msg string, args ...interface{}) { logger.Fatalf(msg, args...) } - -func WithField(key string, val interface{}) Logger { - return logger.WithField(key, val) -} diff --git a/log/options.go b/log/options.go index 05f48b3a40..c410fd239a 100644 --- a/log/options.go +++ b/log/options.go @@ -6,7 +6,6 @@ import ( "os" "runtime" "strings" - "time" "github.com/natefinch/lumberjack" "github.com/sirupsen/logrus" @@ -37,18 +36,10 @@ func WithLevel(level string) Options { lw.entry.Logger.SetLevel(l) } formatter := &logrus.TextFormatter{ - FullTimestamp: false, - DisableLevelTruncation: true, - ForceColors: true, - PadLevelText: false, - DisableColors: false, - } - - if l == logrus.DebugLevel || l == logrus.TraceLevel { - formatter.TimestampFormat = time.RFC3339 - formatter.FullTimestamp = true - formatter.CallerPrettyfier = callerPrettyfier - lw.entry.Logger.SetReportCaller(true) + ForceColors: true, + PadLevelText: true, + FullTimestamp: true, + TimestampFormat: "01-02|15:04:05.000", } lw.entry.Logger.SetFormatter(formatter) } @@ -71,10 +62,10 @@ func WithOutput(outputs ...io.Writer) Options { hook := FileLogHook{ Writer: v, Formatter: &logrus.TextFormatter{ - TimestampFormat: time.RFC3339, - CallerPrettyfier: callerPrettyfier, - DisableTimestamp: false, - FullTimestamp: true, + ForceColors: true, + PadLevelText: true, + FullTimestamp: true, + TimestampFormat: "01-02|15:04:05.000", }, } lw.entry.Logger.AddHook(&hook) diff --git a/node/node.go b/node/node.go index 5c51543426..3e13dbe547 100644 --- a/node/node.go +++ b/node/node.go @@ -38,7 +38,7 @@ import ( type Node struct { eventmux *event.TypeMux config *Config - log log.Logger + logger log.Logger dirLock fileutil.Releaser // prevents concurrent use of instance directory stop chan struct{} // Channel to wait for termination notifications startStopLock sync.Mutex // Start/Stop are protected by an additional lock @@ -61,7 +61,7 @@ const ( ) // New creates a new P2P node, ready for protocol registration. -func New(conf *Config) (*Node, error) { +func New(conf *Config, logger log.Logger) (*Node, error) { // Copy config and resolve the datadir so future changes to the current // working directory don't affect the node. confCopy := *conf @@ -89,6 +89,7 @@ func New(conf *Config) (*Node, error) { eventmux: new(event.TypeMux), stop: make(chan struct{}), databases: make(map[*closeTrackingDB]struct{}), + logger: logger, } // Acquire the instance directory lock. @@ -105,8 +106,8 @@ func New(conf *Config) (*Node, error) { } // Configure RPC servers. - node.http = newHTTPServer(node.log, conf.HTTPTimeouts) - node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) + node.http = newHTTPServer(node.logger, conf.HTTPTimeouts) + node.ws = newHTTPServer(node.logger, rpc.DefaultHTTPTimeouts) return node, nil } @@ -269,7 +270,7 @@ func (n *Node) closeDataDir() { // Release instance directory lock. if n.dirLock != nil { if err := n.dirLock.Release(); err != nil { - n.log.Error("Can't release datadir lock", "err", err) + n.logger.Error("Can't release datadir lock", "err", err) } n.dirLock = nil } @@ -469,7 +470,7 @@ func (n *Node) OpenDatabase(name string, cache, handles int, namespace string, r Cache: cache, Handles: handles, ReadOnly: readonly, - }, n.config.NodeLocation.Context()) + }, n.config.NodeLocation.Context(), n.logger) } if err == nil { @@ -502,7 +503,7 @@ func (n *Node) OpenDatabaseWithFreezer(name string, cache, handles int, ancient, Cache: cache, Handles: handles, ReadOnly: readonly, - }, n.config.NodeLocation.Context()) + }, n.config.NodeLocation.Context(), n.logger) } if err == nil { diff --git a/node/rpcstack.go b/node/rpcstack.go index a7cc958194..d21ec90f67 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -55,7 +55,7 @@ type rpcHandler struct { } type httpServer struct { - log log.Logger + logger log.Logger timeouts rpc.HTTPTimeouts mux http.ServeMux // registered handlers go here @@ -80,8 +80,8 @@ type httpServer struct { handlerNames map[string]string } -func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts) *httpServer { - h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string)} +func newHTTPServer(logger log.Logger, timeouts rpc.HTTPTimeouts) *httpServer { + h := &httpServer{logger: logger, timeouts: timeouts, handlerNames: make(map[string]string)} h.httpHandler.Store((*rpcHandler)(nil)) h.wsHandler.Store((*rpcHandler)(nil)) @@ -149,14 +149,14 @@ func (h *httpServer) start() error { if h.wsConfig.prefix != "" { url += h.wsConfig.prefix } - log.Info("WebSocket enabled", "url", url) + h.logger.Info("WebSocket enabled", "url", url) } // if server is websocket only, return after logging if !h.rpcAllowed() { return nil } // Log http endpoint. - log.Info("HTTP server started", + h.logger.Info("HTTP server started", "endpoint", listener.Addr(), "prefix", h.httpConfig.prefix, "cors", strings.Join(h.httpConfig.CorsAllowedOrigins, ","), @@ -173,7 +173,7 @@ func (h *httpServer) start() error { for _, path := range paths { name := h.handlerNames[path] if !logged[name] { - log.Info(name+" enabled", "url", "http://"+listener.Addr().String()+path) + h.logger.Info(name+" enabled", "url", "http://"+listener.Addr().String()+path) logged[name] = true } } @@ -262,7 +262,7 @@ func (h *httpServer) doStop() { } h.server.Shutdown(context.Background()) h.listener.Close() - h.log.Info("HTTP server stopped", "endpoint", h.listener.Addr()) + h.logger.Info("HTTP server stopped", "endpoint", h.listener.Addr()) // Clear out everything to allow re-configuring it later. h.host, h.port, h.endpoint = "", 0, "" @@ -280,7 +280,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig) error { // Create RPC server and handler. srv := rpc.NewServer() - if err := RegisterApis(apis, config.Modules, srv, false); err != nil { + if err := RegisterApis(apis, config.Modules, srv, false, h.logger); err != nil { return err } h.httpConfig = config @@ -312,7 +312,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig) error { // Create RPC server and handler. srv := rpc.NewServer() - if err := RegisterApis(apis, config.Modules, srv, false); err != nil { + if err := RegisterApis(apis, config.Modules, srv, false, h.logger); err != nil { return err } h.wsConfig = config @@ -472,9 +472,9 @@ func newGzipHandler(next http.Handler) http.Handler { // RegisterApis checks the given modules' availability, generates an allowlist based on the allowed modules, // and then registers all of the APIs exposed by the services. -func RegisterApis(apis []rpc.API, modules []string, srv *rpc.Server, exposeAll bool) error { +func RegisterApis(apis []rpc.API, modules []string, srv *rpc.Server, exposeAll bool, logger log.Logger) error { if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 { - log.Error("Unavailable modules in HTTP API list", "unavailable", bad, "available", available) + logger.Error("Unavailable modules in HTTP API list", "unavailable", bad, "available", available) } // Generate the allow list based on the allowed modules allowList := make(map[string]bool) diff --git a/p2p/node/events.go b/p2p/node/events.go index 5fca0b4d40..aa78d78a19 100644 --- a/p2p/node/events.go +++ b/p2p/node/events.go @@ -39,7 +39,7 @@ func (p *P2PNode) eventLoop() { } else { for _, addr := range e.Current { addr := addr.Address.Encapsulate(p2pAddr) - log.Infof("Event: 'Local address udpdated': %s", addr) + log.Infof("Event: 'Local address updated': %s", addr) } // log removed addresses for _, addr := range e.Removed { diff --git a/quai/backend.go b/quai/backend.go index 6bf32c9f0d..7a878609a9 100644 --- a/quai/backend.go +++ b/quai/backend.go @@ -70,14 +70,16 @@ type Quai struct { etherbase common.Address lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase) + + logger log.Logger } // New creates a new Quai object (including the // initialisation of the common Quai object) -func New(stack *node.Node, config *quaiconfig.Config, nodeCtx int) (*Quai, error) { +func New(stack *node.Node, config *quaiconfig.Config, nodeCtx int, logger log.Logger) (*Quai, error) { // Ensure configuration values are compatible and sane if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(common.Big0) <= 0 { - log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", quaiconfig.Defaults.Miner.GasPrice) + logger.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", quaiconfig.Defaults.Miner.GasPrice) config.Miner.GasPrice = new(big.Int).Set(quaiconfig.Defaults.Miner.GasPrice) } if config.NoPruning && config.TrieDirtyCache > 0 { @@ -89,22 +91,22 @@ func New(stack *node.Node, config *quaiconfig.Config, nodeCtx int) (*Quai, error } config.TrieDirtyCache = 0 } - log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) + logger.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) // Assemble the Quai object chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false) if err != nil { return nil, err } - chainConfig, _, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.NodeLocation) + chainConfig, _, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.NodeLocation, logger) if genesisErr != nil { return nil, genesisErr } - log.Warn("Memory location of chainConfig", "location", &chainConfig) + logger.Warn("Memory location of chainConfig", "location", &chainConfig) if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil { - log.Error("Failed to recover state", "error", err) + logger.Error("Failed to recover state", "error", err) } quai := &Quai{ config: config, @@ -114,6 +116,7 @@ func New(stack *node.Node, config *quaiconfig.Config, nodeCtx int) (*Quai, error gasPrice: config.Miner.GasPrice, etherbase: config.Miner.Etherbase, bloomRequests: make(chan chan *bloombits.Retrieval), + logger: logger, } // Copy the chainConfig @@ -127,36 +130,36 @@ func New(stack *node.Node, config *quaiconfig.Config, nodeCtx int) (*Quai, error } chainConfig = &newChainConfig - log.Info("Chain Config", config.NodeLocation) + logger.Info("Chain Config", config.NodeLocation) chainConfig.Location = config.NodeLocation // TODO: See why this is necessary - log.Info("Node", "Ctx", nodeCtx, "NodeLocation", config.NodeLocation, "genesis location", config.Genesis.Config.Location, "chain config", chainConfig.Location) + logger.Info("Node", "Ctx", nodeCtx, "NodeLocation", config.NodeLocation, "genesis location", config.Genesis.Config.Location, "chain config", chainConfig.Location) if config.ConsensusEngine == "blake3" { blake3Config := config.Blake3Pow blake3Config.NotifyFull = config.Miner.NotifyFull blake3Config.NodeLocation = config.NodeLocation - quai.engine = quaiconfig.CreateBlake3ConsensusEngine(stack, config.NodeLocation, &blake3Config, config.Miner.Notify, config.Miner.Noverify, chainDb) + quai.engine = quaiconfig.CreateBlake3ConsensusEngine(stack, config.NodeLocation, &blake3Config, config.Miner.Notify, config.Miner.Noverify, chainDb, logger) } else { // Transfer mining-related config to the progpow config. progpowConfig := config.Progpow progpowConfig.NodeLocation = config.NodeLocation progpowConfig.NotifyFull = config.Miner.NotifyFull - quai.engine = quaiconfig.CreateProgpowConsensusEngine(stack, config.NodeLocation, &progpowConfig, config.Miner.Notify, config.Miner.Noverify, chainDb) + quai.engine = quaiconfig.CreateProgpowConsensusEngine(stack, config.NodeLocation, &progpowConfig, config.Miner.Notify, config.Miner.Noverify, chainDb, logger) } - log.Info("Initialised chain configuration", "config", chainConfig) + logger.Info("Initialised chain configuration", "config", chainConfig) bcVersion := rawdb.ReadDatabaseVersion(chainDb) var dbVer = "" if bcVersion != nil { dbVer = fmt.Sprintf("%d", *bcVersion) } - log.Info("Initialising Quai protocol", "network", config.NetworkId, "dbversion", dbVer) + logger.Info("Initialising Quai protocol", "network", config.NetworkId, "dbversion", dbVer) if !config.SkipBcVersionCheck { if bcVersion != nil && *bcVersion > core.BlockChainVersion { return nil, fmt.Errorf("database version is v%d, Quai %s only supports v%d", *bcVersion, params.Version.Full(), core.BlockChainVersion) } else if bcVersion == nil || *bcVersion < core.BlockChainVersion { if bcVersion != nil { // only print warning on upgrade, not on init - log.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion) + logger.Warn("Upgrade blockchain database version", "from", dbVer, "to", core.BlockChainVersion) } rawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion) } @@ -182,15 +185,15 @@ func New(stack *node.Node, config *quaiconfig.Config, nodeCtx int) (*Quai, error config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } - log.Info("Dom clietn", "url", quai.config.DomUrl) - quai.core, err = core.NewCore(chainDb, &config.Miner, quai.isLocalBlock, &config.TxPool, &config.TxLookupLimit, chainConfig, quai.config.SlicesRunning, quai.config.DomUrl, quai.config.SubUrls, quai.engine, cacheConfig, vmConfig, config.Genesis) + logger.Info("Dom client", "url", quai.config.DomUrl) + quai.core, err = core.NewCore(chainDb, &config.Miner, quai.isLocalBlock, &config.TxPool, &config.TxLookupLimit, chainConfig, quai.config.SlicesRunning, quai.config.DomUrl, quai.config.SubUrls, quai.engine, cacheConfig, vmConfig, config.Genesis, logger) if err != nil { return nil, err } // Only index bloom if processing state if quai.core.ProcessingState() && nodeCtx == common.ZONE_CTX { - quai.bloomIndexer = core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms, chainConfig.Location.Context()) + quai.bloomIndexer = core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms, chainConfig.Location.Context(), logger) quai.bloomIndexer.Start(quai.Core().Slice().HeaderChain()) } @@ -201,22 +204,22 @@ func New(stack *node.Node, config *quaiconfig.Config, nodeCtx int) (*Quai, error if gpoParams.Default == nil { gpoParams.Default = config.Miner.GasPrice } - quai.APIBackend.gpo = gasprice.NewOracle(quai.APIBackend, gpoParams) + quai.APIBackend.gpo = gasprice.NewOracle(quai.APIBackend, gpoParams, logger) } // Register the backend on the node stack.RegisterAPIs(quai.APIs()) stack.RegisterLifecycle(quai) // Check for unclean shutdown - if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil { - log.Error("Could not update unclean-shutdown-marker list", "error", err) + if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb, logger); err != nil { + logger.Error("Could not update unclean-shutdown-marker list", "error", err) } else { if discards > 0 { - log.Warn("Old unclean shutdowns found", "count", discards) + logger.Warn("Old unclean shutdowns found", "count", discards) } for _, tstamp := range uncleanShutdowns { t := time.Unix(int64(tstamp), 0) - log.Warn("Unclean shutdown detected", "booted", t, + logger.Warn("Unclean shutdown detected", "booted", t, "age", common.PrettyAge(t)) } } @@ -290,7 +293,7 @@ func (s *Quai) Etherbase() (eb common.Address, err error) { func (s *Quai) isLocalBlock(header *types.Header) bool { author, err := s.engine.Author(header) if err != nil { - log.Warn("Failed to retrieve block author", "number", header.NumberU64(s.core.NodeCtx()), "hash", header.Hash(), "err", err) + s.logger.Warn("Failed to retrieve block author", "number", header.NumberU64(s.core.NodeCtx()), "hash", header.Hash(), "err", err) return false } // Check whether the given address is etherbase. @@ -302,7 +305,7 @@ func (s *Quai) isLocalBlock(header *types.Header) bool { } internal, err := author.InternalAddress() if err != nil { - log.Error("Failed to retrieve author internal address", "err", err) + s.logger.Error("Failed to retrieve author internal address", "err", err) } // Check whether the given address is specified by `txpool.local` // CLI flag. diff --git a/quai/gasprice/feehistory.go b/quai/gasprice/feehistory.go index ff0b9669ae..5d97fab4b6 100644 --- a/quai/gasprice/feehistory.go +++ b/quai/gasprice/feehistory.go @@ -27,7 +27,6 @@ import ( "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/consensus/misc" "github.com/dominant-strategies/go-quai/core/types" - "github.com/dominant-strategies/go-quai/log" "github.com/dominant-strategies/go-quai/rpc" ) @@ -94,7 +93,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { return } if bf.block == nil || (bf.receipts == nil && len(bf.block.Transactions()) != 0) { - log.Error("Block or receipts are missing while reward percentiles are requested") + oracle.logger.Error("Block or receipts are missing while reward percentiles are requested") return } @@ -207,7 +206,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast return common.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks } if blocks > maxFeeHistory { - log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) + oracle.logger.Warn("Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) blocks = maxFeeHistory } for i, p := range rewardPercentiles { diff --git a/quai/gasprice/gasprice.go b/quai/gasprice/gasprice.go index d26b49dc41..ee9d2675cc 100644 --- a/quai/gasprice/gasprice.go +++ b/quai/gasprice/gasprice.go @@ -69,36 +69,38 @@ type Oracle struct { checkBlocks, percentile int maxHeaderHistory, maxBlockHistory int + + logger log.Logger } // NewOracle returns a new gasprice oracle which can recommend suitable // gasprice for newly created transaction. -func NewOracle(backend OracleBackend, params Config) *Oracle { +func NewOracle(backend OracleBackend, params Config, logger log.Logger) *Oracle { blocks := params.Blocks if blocks < 1 { blocks = 1 - log.Warn("Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks) + logger.Warn("Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks) } percent := params.Percentile if percent < 0 { percent = 0 - log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) + logger.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) } if percent > 100 { percent = 100 - log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) + logger.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) } maxPrice := params.MaxPrice if maxPrice == nil || maxPrice.Int64() <= 0 { maxPrice = DefaultMaxPrice - log.Warn("Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) + logger.Warn("Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) } ignorePrice := params.IgnorePrice if ignorePrice == nil || ignorePrice.Int64() <= 0 { ignorePrice = DefaultIgnorePrice - log.Warn("Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) + logger.Warn("Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) } else if ignorePrice.Int64() > 0 { - log.Info("Gasprice oracle is ignoring threshold set", "threshold", ignorePrice) + logger.Info("Gasprice oracle is ignoring threshold set", "threshold", ignorePrice) } return &Oracle{ backend: backend, @@ -109,6 +111,7 @@ func NewOracle(backend OracleBackend, params Config) *Oracle { percentile: percent, maxHeaderHistory: params.MaxHeaderHistory, maxBlockHistory: params.MaxBlockHistory, + logger: logger, } } diff --git a/quai/quaiconfig/config.go b/quai/quaiconfig/config.go index d207d8757d..a4bcc542d7 100644 --- a/quai/quaiconfig/config.go +++ b/quai/quaiconfig/config.go @@ -167,15 +167,15 @@ type Config struct { } // CreateProgpowConsensusEngine creates a progpow consensus engine for the given chain configuration. -func CreateProgpowConsensusEngine(stack *node.Node, nodeLocation common.Location, config *progpow.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { +func CreateProgpowConsensusEngine(stack *node.Node, nodeLocation common.Location, config *progpow.Config, notify []string, noverify bool, db ethdb.Database, logger log.Logger) consensus.Engine { // Otherwise assume proof-of-work switch config.PowMode { case progpow.ModeFake: - log.Warn("Progpow used in fake mode") + logger.Warn("Progpow used in fake mode") case progpow.ModeTest: - log.Warn("Progpow used in test mode") + logger.Warn("Progpow used in test mode") case progpow.ModeShared: - log.Warn("Progpow used in shared mode") + logger.Warn("Progpow used in shared mode") } engine := progpow.New(progpow.Config{ PowMode: config.PowMode, @@ -184,21 +184,21 @@ func CreateProgpowConsensusEngine(stack *node.Node, nodeLocation common.Location NodeLocation: nodeLocation, GasCeil: config.GasCeil, MinDifficulty: config.MinDifficulty, - }, notify, noverify) + }, notify, noverify, logger) engine.SetThreads(-1) // Disable CPU mining return engine } // CreateBlake3ConsensusEngine creates a progpow consensus engine for the given chain configuration. -func CreateBlake3ConsensusEngine(stack *node.Node, nodeLocation common.Location, config *blake3pow.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { +func CreateBlake3ConsensusEngine(stack *node.Node, nodeLocation common.Location, config *blake3pow.Config, notify []string, noverify bool, db ethdb.Database, logger log.Logger) consensus.Engine { // Otherwise assume proof-of-work switch config.PowMode { case blake3pow.ModeFake: - log.Warn("Progpow used in fake mode") + logger.Warn("Progpow used in fake mode") case blake3pow.ModeTest: - log.Warn("Progpow used in test mode") + logger.Warn("Progpow used in test mode") case blake3pow.ModeShared: - log.Warn("Progpow used in shared mode") + logger.Warn("Progpow used in shared mode") } engine := blake3pow.New(blake3pow.Config{ PowMode: config.PowMode, @@ -207,7 +207,7 @@ func CreateBlake3ConsensusEngine(stack *node.Node, nodeLocation common.Location, NodeLocation: nodeLocation, GasCeil: config.GasCeil, MinDifficulty: config.MinDifficulty, - }, notify, noverify) + }, notify, noverify, logger) engine.SetThreads(-1) // Disable CPU mining return engine } diff --git a/quaiclient/quaiclient.go b/quaiclient/quaiclient.go index f6cc8d8b08..007fe68704 100644 --- a/quaiclient/quaiclient.go +++ b/quaiclient/quaiclient.go @@ -37,11 +37,11 @@ type Client struct { } // Dial connects a client to the given URL. -func Dial(rawurl string) (*Client, error) { - return DialContext(context.Background(), rawurl) +func Dial(rawurl string, logger log.Logger) (*Client, error) { + return DialContext(context.Background(), rawurl, logger) } -func DialContext(ctx context.Context, rawurl string) (*Client, error) { +func DialContext(ctx context.Context, rawurl string, logger log.Logger) (*Client, error) { connectStatus := false attempts := 0 @@ -62,7 +62,7 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) { } // should only get here if the ffmpeg record stream process dies - log.Warn("Attempting to connect to go-quai node. Waiting and retrying...", "attempts", attempts, "delay", delaySecs, "url", rawurl) + logger.Warn("Attempting to connect to go-quai node. Waiting and retrying...", "attempts", attempts, "delay", delaySecs, "url", rawurl) time.Sleep(time.Duration(delaySecs) * time.Second) }