diff --git a/cmd/go-quai/chaincmd.go b/cmd/go-quai/chaincmd.go
index 9b48789d1b..1a4fd5c3bf 100644
--- a/cmd/go-quai/chaincmd.go
+++ b/cmd/go-quai/chaincmd.go
@@ -36,7 +36,6 @@ import (
"github.com/dominant-strategies/go-quai/crypto"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
"github.com/dominant-strategies/go-quai/node"
"gopkg.in/urfave/cli.v1"
)
@@ -222,10 +221,6 @@ func importChain(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
}
- // Start metrics export if enabled
- utils.SetupMetrics(ctx)
- // Start system runtime metrics collection
- go metrics.CollectProcessMetrics(3 * time.Second)
stack, _ := makeConfigNode(ctx)
defer stack.Close()
diff --git a/cmd/go-quai/config.go b/cmd/go-quai/config.go
index 85d5a137fb..9cbf36ee29 100644
--- a/cmd/go-quai/config.go
+++ b/cmd/go-quai/config.go
@@ -32,7 +32,7 @@ import (
"github.com/dominant-strategies/go-quai/eth/ethconfig"
"github.com/dominant-strategies/go-quai/internal/quaiapi"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+ "github.com/dominant-strategies/go-quai/metrics_config"
"github.com/dominant-strategies/go-quai/node"
"github.com/dominant-strategies/go-quai/p2p/nat"
"github.com/dominant-strategies/go-quai/params"
@@ -86,7 +86,7 @@ type quaiConfig struct {
Eth ethconfig.Config
Node node.Config
Ethstats quaistatsConfig
- Metrics metrics.Config
+ Metrics metrics_config.Config
}
func loadConfig(file string, cfg *quaiConfig) error {
@@ -123,7 +123,7 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, quaiConfig) {
cfg := quaiConfig{
Eth: ethconfig.Defaults,
Node: defaultNodeConfig(ctx),
- Metrics: metrics.DefaultConfig,
+ Metrics: metrics_config.DefaultConfig,
}
// Load config file.
diff --git a/cmd/go-quai/main.go b/cmd/go-quai/main.go
index 602d861967..3ad338c60a 100644
--- a/cmd/go-quai/main.go
+++ b/cmd/go-quai/main.go
@@ -24,6 +24,8 @@ import (
"strconv"
"time"
+ "github.com/dominant-strategies/go-quai/metrics_config"
+
"github.com/dominant-strategies/go-quai/cmd/utils"
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/eth"
@@ -32,7 +34,6 @@ import (
"github.com/dominant-strategies/go-quai/internal/flags"
"github.com/dominant-strategies/go-quai/internal/quaiapi"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
"github.com/dominant-strategies/go-quai/node"
"gopkg.in/urfave/cli.v1"
@@ -246,13 +247,13 @@ func prepare(ctx *cli.Context) {
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(128))
}
- // Start metrics export if enabled
- utils.SetupMetrics(ctx)
-
// Start system runtime metrics collection
- if ctx.GlobalBool(utils.MetricsEnabledFlag.Name) {
- go metrics.CollectProcessMetrics(3 * time.Second)
+ if ctx.GlobalIsSet(utils.MetricsEnabledFlag.Name) {
+ log.Info("Starting metrics")
+ metrics_config.EnableMetrics()
+ go metrics_config.StartProcessMetrics()
}
+
}
// quai is the main entry point into the system if no special subcommand is ran.
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 42d95e7515..fc3c8595d6 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -32,7 +32,6 @@ import (
"strings"
"text/tabwriter"
"text/template"
- "time"
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/common/fdlimit"
@@ -51,9 +50,8 @@ import (
"github.com/dominant-strategies/go-quai/internal/flags"
"github.com/dominant-strategies/go-quai/internal/quaiapi"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
- "github.com/dominant-strategies/go-quai/metrics/exp"
- "github.com/dominant-strategies/go-quai/metrics/influxdb"
+ "github.com/dominant-strategies/go-quai/metrics_config"
+
"github.com/dominant-strategies/go-quai/node"
"github.com/dominant-strategies/go-quai/p2p"
"github.com/dominant-strategies/go-quai/p2p/enode"
@@ -1604,34 +1602,6 @@ func RegisterQuaiStatsService(stack *node.Node, backend quaiapi.Backend, url str
}
}
-func SetupMetrics(ctx *cli.Context) {
- if metrics.Enabled {
- log.Info("Enabling metrics collection")
-
- var (
- enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name)
- endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
- database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
- username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
- password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
- )
-
- if enableExport {
- tagsMap := SplitTagsFlag(ctx.GlobalString(MetricsInfluxDBTagsFlag.Name))
-
- log.Info("Enabling metrics export to InfluxDB")
-
- go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "quai.", tagsMap)
- }
-
- if ctx.GlobalIsSet(MetricsHTTPFlag.Name) {
- address := fmt.Sprintf("%s:%d", ctx.GlobalString(MetricsHTTPFlag.Name), ctx.GlobalInt(MetricsPortFlag.Name))
- log.Info("Enabling stand-alone metrics HTTP endpoint", "address", address)
- exp.Setup(address)
- }
- }
-}
-
func SplitTagsFlag(tagsFlag string) map[string]string {
tags := strings.Split(tagsFlag, ",")
tagsMap := map[string]string{}
diff --git a/consensus/blake3pow/blake3pow.go b/consensus/blake3pow/blake3pow.go
index 64e1a1946a..e1ab21b3f7 100644
--- a/consensus/blake3pow/blake3pow.go
+++ b/consensus/blake3pow/blake3pow.go
@@ -10,7 +10,7 @@ import (
"github.com/dominant-strategies/go-quai/common/hexutil"
"github.com/dominant-strategies/go-quai/consensus"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/rpc"
)
@@ -62,7 +62,6 @@ type Blake3pow struct {
rand *rand.Rand // Properly seeded random source for nonces
threads int // Number of threads to mine on if mining
update chan struct{} // Notification channel to update mining parameters
- hashrate metrics.Meter // Meter tracking the average hashrate
remote *remoteSealer
// The fields below are hooks for testing
@@ -84,7 +83,6 @@ func New(config Config, notify []string, noverify bool) *Blake3pow {
blake3pow := &Blake3pow{
config: config,
update: make(chan struct{}),
- hashrate: metrics.NewMeterForced(),
}
if config.PowMode == ModeShared {
blake3pow.shared = sharedBlake3pow
@@ -205,7 +203,6 @@ func (blake3pow *Blake3pow) SetThreads(threads int) {
func (blake3pow *Blake3pow) Hashrate() float64 {
// Short circuit if we are run the blake3pow in normal/test mode.
if blake3pow.config.PowMode != ModeNormal && blake3pow.config.PowMode != ModeTest {
- return blake3pow.hashrate.Rate1()
}
var res = make(chan uint64, 1)
@@ -213,11 +210,10 @@ func (blake3pow *Blake3pow) Hashrate() float64 {
case blake3pow.remote.fetchRateCh <- res:
case <-blake3pow.remote.exitCh:
// Return local hashrate only if blake3pow is stopped.
- return blake3pow.hashrate.Rate1()
}
// Gather total submitted hash rate of remote sealers.
- return blake3pow.hashrate.Rate1() + float64(<-res)
+ return -1
}
// SubmitHashrate can be used for remote miners to submit their hash rate.
diff --git a/consensus/blake3pow/sealer.go b/consensus/blake3pow/sealer.go
index aecec6e06a..1fe485248d 100644
--- a/consensus/blake3pow/sealer.go
+++ b/consensus/blake3pow/sealer.go
@@ -132,14 +132,12 @@ search:
case <-abort:
// Mining terminated, update stats and abort
logger.Trace("Blake3pow nonce search aborted", "attempts", nonce-seed)
- blake3pow.hashrate.Mark(attempts)
break search
default:
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
attempts++
if (attempts % (1 << 15)) == 0 {
- blake3pow.hashrate.Mark(attempts)
attempts = 0
}
// Compute the PoW value of this nonce
diff --git a/consensus/progpow/progpow.go b/consensus/progpow/progpow.go
index d4b82514dc..4dbc2b2563 100644
--- a/consensus/progpow/progpow.go
+++ b/consensus/progpow/progpow.go
@@ -18,7 +18,6 @@ import (
"github.com/dominant-strategies/go-quai/common/hexutil"
"github.com/dominant-strategies/go-quai/consensus"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
"github.com/dominant-strategies/go-quai/rpc"
mmap "github.com/edsrzf/mmap-go"
"github.com/hashicorp/golang-lru/simplelru"
@@ -174,11 +173,10 @@ type Progpow struct {
caches *lru // In memory caches to avoid regenerating too often
// Mining related fields
- rand *rand.Rand // Properly seeded random source for nonces
- threads int // Number of threads to mine on if mining
- update chan struct{} // Notification channel to update mining parameters
- hashrate metrics.Meter // Meter tracking the average hashrate
- remote *remoteSealer
+ rand *rand.Rand // Properly seeded random source for nonces
+ threads int // Number of threads to mine on if mining
+ update chan struct{} // Notification channel to update mining parameters
+ remote *remoteSealer
// The fields below are hooks for testing
shared *Progpow // Shared PoW verifier to avoid cache regeneration
@@ -204,10 +202,9 @@ func New(config Config, notify []string, noverify bool) *Progpow {
config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
}
progpow := &Progpow{
- config: config,
- caches: newlru("cache", config.CachesInMem, newCache),
- update: make(chan struct{}),
- hashrate: metrics.NewMeterForced(),
+ config: config,
+ caches: newlru("cache", config.CachesInMem, newCache),
+ update: make(chan struct{}),
}
if config.PowMode == ModeShared {
progpow.shared = sharedProgpow
@@ -482,7 +479,7 @@ func (progpow *Progpow) SetThreads(threads int) {
func (progpow *Progpow) Hashrate() float64 {
// Short circuit if we are run the progpow in normal/test mode.
if progpow.config.PowMode != ModeNormal && progpow.config.PowMode != ModeTest {
- return progpow.hashrate.Rate1()
+ return -1
}
var res = make(chan uint64, 1)
@@ -490,11 +487,11 @@ func (progpow *Progpow) Hashrate() float64 {
case progpow.remote.fetchRateCh <- res:
case <-progpow.remote.exitCh:
// Return local hashrate only if progpow is stopped.
- return progpow.hashrate.Rate1()
+ return -1
}
// Gather total submitted hash rate of remote sealers.
- return progpow.hashrate.Rate1() + float64(<-res)
+ return -1
}
// SubmitHashrate can be used for remote miners to submit their hash rate.
diff --git a/consensus/progpow/sealer.go b/consensus/progpow/sealer.go
index 1f09504a8e..88a4d49ef6 100644
--- a/consensus/progpow/sealer.go
+++ b/consensus/progpow/sealer.go
@@ -127,14 +127,12 @@ search:
select {
case <-abort:
// Mining terminated, update stats and abort
- progpow.hashrate.Mark(attempts)
break search
default:
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
attempts++
if (attempts % (1 << 15)) == 0 {
- progpow.hashrate.Mark(attempts)
attempts = 0
}
powLight := func(size uint64, cache []uint32, hash []byte, nonce uint64, blockNumber uint64) ([]byte, []byte) {
diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go
index 19f71172e3..d125cd7592 100644
--- a/core/rawdb/accessors_state.go
+++ b/core/rawdb/accessors_state.go
@@ -35,8 +35,6 @@ func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
log.Fatal("Failed to store trie preimage", "err", err)
}
}
- preimageCounter.Inc(int64(len(preimages)))
- preimageHitCounter.Inc(int64(len(preimages)))
}
// ReadCode retrieves the contract code of the provided code hash.
diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go
index 8ad7222767..eeafd8c626 100644
--- a/core/rawdb/freezer.go
+++ b/core/rawdb/freezer.go
@@ -29,7 +29,7 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/params"
"github.com/prometheus/tsdb/fileutil"
)
@@ -93,9 +93,6 @@ type freezer struct {
func newFreezer(datadir string, namespace string, readonly bool) (*freezer, error) {
// Create the initial freezer object
var (
- readMeter = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
- writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
- sizeGauge = metrics.NewRegisteredGauge(namespace+"ancient/size", nil)
)
// Ensure the datadir is not a symbolic link if it exists.
if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
@@ -120,7 +117,7 @@ func newFreezer(datadir string, namespace string, readonly bool) (*freezer, erro
quit: make(chan struct{}),
}
for name, disableSnappy := range FreezerNoSnappy {
- table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, disableSnappy)
+ table, err := newTable(datadir, name, disableSnappy)
if err != nil {
for _, table := range freezer.tables {
table.Close()
diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go
index cc63f59826..8e75fce15e 100644
--- a/core/rawdb/freezer_table.go
+++ b/core/rawdb/freezer_table.go
@@ -28,7 +28,6 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
"github.com/golang/snappy"
)
@@ -94,10 +93,7 @@ type freezerTable struct {
// to count how many historic items have gone missing.
itemOffset uint32 // Offset (number of discarded items)
- headBytes uint32 // Number of bytes written to the head file
- readMeter metrics.Meter // Meter for measuring the effective amount of data read
- writeMeter metrics.Meter // Meter for measuring the effective amount of data written
- sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables
+ headBytes uint32 // Number of bytes written to the head file
logger log.Logger // Logger with database path and table name ambedded
lock sync.RWMutex // Mutex protecting the data file descriptors
@@ -105,12 +101,12 @@ type freezerTable struct {
// NewFreezerTable opens the given path as a freezer table.
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
- return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, disableSnappy)
+ return newTable(path, name, disableSnappy)
}
// newTable opens a freezer table with default settings - 2G files
-func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) {
- return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy)
+func newTable(path string, name string, disableSnappy bool) (*freezerTable, error) {
+ return newCustomTable(path, name, 2*1000*1000*1000, disableSnappy)
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end
@@ -154,7 +150,7 @@ func truncateFreezerFile(file *os.File, size int64) error {
// newCustomTable opens a freezer table, creating the data and index files if they are
// non existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync.
-func newCustomTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
+func newCustomTable(path string, name string, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
@@ -175,9 +171,6 @@ func newCustomTable(path string, name string, readMeter metrics.Meter, writeMete
tab := &freezerTable{
index: offsets,
files: make(map[uint32]*os.File),
- readMeter: readMeter,
- writeMeter: writeMeter,
- sizeGauge: sizeGauge,
name: name,
path: path,
logger: log.Log,
@@ -188,13 +181,6 @@ func newCustomTable(path string, name string, readMeter metrics.Meter, writeMete
tab.Close()
return nil, err
}
- // Initialize the starting size counter
- size, err := tab.sizeNolock()
- if err != nil {
- tab.Close()
- return nil, err
- }
- tab.sizeGauge.Inc(int64(size))
return tab, nil
}
@@ -339,11 +325,6 @@ func (t *freezerTable) truncate(items uint64) error {
if existing <= items {
return nil
}
- // We need to truncate, save the old size for metrics tracking
- oldSize, err := t.sizeNolock()
- if err != nil {
- return err
- }
// Something's out of sync, truncate the table's offset index
log := t.logger.Debug
if existing > items+1 {
@@ -383,13 +364,6 @@ func (t *freezerTable) truncate(items uint64) error {
atomic.StoreUint64(&t.items, items)
atomic.StoreUint32(&t.headBytes, expected.offset)
- // Retrieve the new size and update the total size counter
- newSize, err := t.sizeNolock()
- if err != nil {
- return err
- }
- t.sizeGauge.Dec(int64(oldSize - newSize))
-
return nil
}
@@ -539,9 +513,6 @@ func (t *freezerTable) append(item uint64, encodedBlob []byte, wlock bool) (bool
// Write indexEntry
t.index.Write(idx.marshallBinary())
- t.writeMeter.Mark(int64(bLen + indexEntrySize))
- t.sizeGauge.Inc(int64(bLen + indexEntrySize))
-
atomic.AddUint64(&t.items, 1)
return false, nil
}
@@ -622,7 +593,6 @@ func (t *freezerTable) retrieve(item uint64) ([]byte, error) {
if _, err := dataFile.ReadAt(blob, int64(startOffset)); err != nil {
return nil, err
}
- t.readMeter.Mark(int64(len(blob) + 2*indexEntrySize))
return blob, nil
}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 0f89d69964..c5324f6fc4 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -22,7 +22,6 @@ import (
"encoding/binary"
"github.com/dominant-strategies/go-quai/common"
- "github.com/dominant-strategies/go-quai/metrics"
)
// The fields below define the low level database schema prefixing.
@@ -116,9 +115,6 @@ var (
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
-
- preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
- preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
)
const (
diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go
index 8be7221c53..f0cb10f142 100644
--- a/core/state/snapshot/difflayer.go
+++ b/core/state/snapshot/difflayer.go
@@ -24,7 +24,6 @@ import (
"sort"
"sync"
"sync/atomic"
- "time"
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/rlp"
@@ -193,7 +192,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
}
// Determine memory size and track the dirty writes
dl.memory += uint64(common.HashLength + len(blob))
- snapshotDirtyAccountWriteMeter.Mark(int64(len(blob)))
}
for accountHash, slots := range storage {
if slots == nil {
@@ -202,7 +200,6 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s
// Determine memory size and track the dirty writes
for _, data := range slots {
dl.memory += uint64(common.HashLength + len(data))
- snapshotDirtyStorageWriteMeter.Mark(int64(len(data)))
}
}
dl.memory += uint64(len(destructs) * common.HashLength)
@@ -215,10 +212,6 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
dl.lock.Lock()
defer dl.lock.Unlock()
- defer func(start time.Time) {
- snapshotBloomIndexTimer.Update(time.Since(start))
- }(time.Now())
-
// Inject the new origin that triggered the rebloom
dl.origin = origin
@@ -242,13 +235,6 @@ func (dl *diffLayer) rebloom(origin *diskLayer) {
dl.diffed.Add(storageBloomHasher{accountHash, storageHash})
}
}
- // Calculate the current false positive rate and update the error rate meter.
- // This is a bit cheating because subsequent layers will overwrite it, but it
- // should be fine, we're only interested in ballpark figures.
- k := float64(dl.diffed.K())
- n := float64(dl.diffed.N())
- m := float64(dl.diffed.M())
- snapshotBloomErrorGauge.Update(math.Pow(1.0-math.Exp((-k)*(n+0.5)/(m-1)), k))
}
// Root returns the root hash for which this snapshot was made.
@@ -305,7 +291,6 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
// If the bloom filter misses, don't even bother with traversing the memory
// diff layers, reach straight into the bottom persistent disk layer
if origin != nil {
- snapshotBloomAccountMissMeter.Mark(1)
return origin.AccountRLP(hash)
}
// The bloom filter hit, start poking in the internal maps
@@ -326,18 +311,10 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
}
// If the account is known locally, return it
if data, ok := dl.accountData[hash]; ok {
- snapshotDirtyAccountHitMeter.Mark(1)
- snapshotDirtyAccountHitDepthHist.Update(int64(depth))
- snapshotDirtyAccountReadMeter.Mark(int64(len(data)))
- snapshotBloomAccountTrueHitMeter.Mark(1)
return data, nil
}
// If the account is known locally, but deleted, return it
if _, ok := dl.destructSet[hash]; ok {
- snapshotDirtyAccountHitMeter.Mark(1)
- snapshotDirtyAccountHitDepthHist.Update(int64(depth))
- snapshotDirtyAccountInexMeter.Mark(1)
- snapshotBloomAccountTrueHitMeter.Mark(1)
return nil, nil
}
// Account unknown to this diff, resolve from parent
@@ -345,7 +322,6 @@ func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
return diff.accountRLP(hash, depth+1)
}
// Failed to resolve through diff layers, mark a bloom error and use the disk
- snapshotBloomAccountFalseHitMeter.Mark(1)
return dl.parent.AccountRLP(hash)
}
@@ -371,7 +347,6 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
// If the bloom filter misses, don't even bother with traversing the memory
// diff layers, reach straight into the bottom persistent disk layer
if origin != nil {
- snapshotBloomStorageMissMeter.Mark(1)
return origin.Storage(accountHash, storageHash)
}
// The bloom filter hit, start poking in the internal maps
@@ -393,23 +368,11 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
// If the account is known locally, try to resolve the slot locally
if storage, ok := dl.storageData[accountHash]; ok {
if data, ok := storage[storageHash]; ok {
- snapshotDirtyStorageHitMeter.Mark(1)
- snapshotDirtyStorageHitDepthHist.Update(int64(depth))
- if n := len(data); n > 0 {
- snapshotDirtyStorageReadMeter.Mark(int64(n))
- } else {
- snapshotDirtyStorageInexMeter.Mark(1)
- }
- snapshotBloomStorageTrueHitMeter.Mark(1)
return data, nil
}
}
// If the account is known locally, but deleted, return an empty slot
if _, ok := dl.destructSet[accountHash]; ok {
- snapshotDirtyStorageHitMeter.Mark(1)
- snapshotDirtyStorageHitDepthHist.Update(int64(depth))
- snapshotDirtyStorageInexMeter.Mark(1)
- snapshotBloomStorageTrueHitMeter.Mark(1)
return nil, nil
}
// Storage slot unknown to this diff, resolve from parent
@@ -417,7 +380,6 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([
return diff.storage(accountHash, storageHash, depth+1)
}
// Failed to resolve through diff layers, mark a bloom error and use the disk
- snapshotBloomStorageFalseHitMeter.Mark(1)
return dl.parent.Storage(accountHash, storageHash)
}
diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go
index 8af58528fd..b16cd31eed 100644
--- a/core/state/snapshot/disklayer.go
+++ b/core/state/snapshot/disklayer.go
@@ -96,25 +96,14 @@ func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) {
if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 {
return nil, ErrNotCoveredYet
}
- // If we're in the disk layer, all diff layers missed
- snapshotDirtyAccountMissMeter.Mark(1)
-
// Try to retrieve the account from the memory cache
if blob, found := dl.cache.HasGet(nil, hash[:]); found {
- snapshotCleanAccountHitMeter.Mark(1)
- snapshotCleanAccountReadMeter.Mark(int64(len(blob)))
return blob, nil
}
// Cache doesn't contain account, pull from disk and cache for later
blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash)
dl.cache.Set(hash[:], blob)
- snapshotCleanAccountMissMeter.Mark(1)
- if n := len(blob); n > 0 {
- snapshotCleanAccountWriteMeter.Mark(int64(n))
- } else {
- snapshotCleanAccountInexMeter.Mark(1)
- }
return blob, nil
}
@@ -136,25 +125,15 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 {
return nil, ErrNotCoveredYet
}
- // If we're in the disk layer, all diff layers missed
- snapshotDirtyStorageMissMeter.Mark(1)
// Try to retrieve the storage slot from the memory cache
if blob, found := dl.cache.HasGet(nil, key); found {
- snapshotCleanStorageHitMeter.Mark(1)
- snapshotCleanStorageReadMeter.Mark(int64(len(blob)))
return blob, nil
}
// Cache doesn't contain storage slot, pull from disk and cache for later
blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash)
dl.cache.Set(key, blob)
- snapshotCleanStorageMissMeter.Mark(1)
- if n := len(blob); n > 0 {
- snapshotCleanStorageWriteMeter.Mark(int64(n))
- } else {
- snapshotCleanStorageInexMeter.Mark(1)
- }
return blob, nil
}
diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go
index 550d492116..af0200031a 100644
--- a/core/state/snapshot/generate.go
+++ b/core/state/snapshot/generate.go
@@ -33,7 +33,7 @@ import (
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/ethdb/memorydb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/rlp"
"github.com/dominant-strategies/go-quai/trie"
)
@@ -64,33 +64,6 @@ var (
// Metrics in generation
var (
- snapGeneratedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/generated", nil)
- snapRecoveredAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/recovered", nil)
- snapWipedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/wiped", nil)
- snapMissallAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/missall", nil)
- snapGeneratedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/generated", nil)
- snapRecoveredStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/recovered", nil)
- snapWipedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/wiped", nil)
- snapMissallStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/missall", nil)
- snapSuccessfulRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/success", nil)
- snapFailedRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/failure", nil)
-
- // snapAccountProveCounter measures time spent on the account proving
- snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil)
- // snapAccountTrieReadCounter measures time spent on the account trie iteration
- snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil)
- // snapAccountSnapReadCounter measues time spent on the snapshot account iteration
- snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil)
- // snapAccountWriteCounter measures time spent on writing/updating/deleting accounts
- snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil)
- // snapStorageProveCounter measures time spent on storage proving
- snapStorageProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/prove", nil)
- // snapStorageTrieReadCounter measures time spent on the storage trie iteration
- snapStorageTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/trieread", nil)
- // snapStorageSnapReadCounter measures time spent on the snapshot storage iteration
- snapStorageSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/snapread", nil)
- // snapStorageWriteCounter measures time spent on writing/updating/deleting storages
- snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil)
)
// generatorStats is a collection of statistics gathered by the snapshot generator
@@ -258,7 +231,6 @@ func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix
iter := dl.diskdb.NewIterator(prefix, origin)
defer iter.Release()
- var start = time.Now()
for iter.Next() {
key := iter.Key()
if len(key) != len(prefix)+common.HashLength {
@@ -290,20 +262,6 @@ func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix
}
}
}
- // Update metrics for database iteration and merkle proving
- if kind == "storage" {
- snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds())
- } else {
- snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds())
- }
- defer func(start time.Time) {
- if kind == "storage" {
- snapStorageProveCounter.Inc(time.Since(start).Nanoseconds())
- } else {
- snapAccountProveCounter.Inc(time.Since(start).Nanoseconds())
- }
- }(time.Now())
-
// The snap state is exhausted, pass the entire key/val set for verification
if origin == nil && !diskMore {
stackTr := trie.NewStackTrie(nil)
@@ -398,7 +356,6 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
// The range prover says the range is correct, skip trie iteration
if result.valid() {
- snapSuccessfulRangeProofMeter.Mark(1)
logger.Trace("Proved state range", "last", hexutil.Encode(last))
// The verification is passed, process each state with the given
@@ -411,20 +368,12 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
return !result.diskMore && !result.trieMore, last, nil
}
logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr)
- snapFailedRangeProofMeter.Mark(1)
// Special case, the entire trie is missing. In the original trie scheme,
// all the duplicated subtries will be filter out(only one copy of data
// will be stored). While in the snapshot model, all the storage tries
// belong to different contracts will be kept even they are duplicated.
// Track it to a certain extent remove the noise data used for statistics.
- if origin == nil && last == nil {
- meter := snapMissallAccountMeter
- if kind == "storage" {
- meter = snapMissallStorageMeter
- }
- meter.Mark(1)
- }
// We use the snap data to build up a cache which can be used by the
// main account trie as a primary lookup when resolving hashes
@@ -462,7 +411,6 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
untouched = 0 // states already correct
// timers
- start = time.Now()
internal time.Duration
)
nodeIt.AddResolver(snapNodeCache)
@@ -520,9 +468,7 @@ func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string,
// Update metrics for counting trie iteration
if kind == "storage" {
- snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
} else {
- snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
}
logger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last),
"count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted)
@@ -588,20 +534,17 @@ func (dl *diskLayer) generate(stats *generatorStats) {
onAccount := func(key []byte, val []byte, write bool, delete bool) error {
var (
- start = time.Now()
accountHash = common.BytesToHash(key)
)
if delete {
rawdb.DeleteAccountSnapshot(batch, accountHash)
- snapWipedAccountMeter.Mark(1)
// Ensure that any previous snapshot storage values are cleared
prefix := append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...)
keyLen := len(rawdb.SnapshotStoragePrefix) + 2*common.HashLength
- if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil {
+ if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, false); err != nil {
return err
}
- snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
return nil
}
// Retrieve the current account and flatten it into the internal format
@@ -624,12 +567,10 @@ func (dl *diskLayer) generate(stats *generatorStats) {
if acc.Root == emptyRoot {
dataLen -= 32
}
- snapRecoveredAccountMeter.Mark(1)
} else {
data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
dataLen = len(data)
rawdb.WriteAccountSnapshot(batch, accountHash, data)
- snapGeneratedAccountMeter.Mark(1)
}
stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
stats.accounts++
@@ -648,32 +589,23 @@ func (dl *diskLayer) generate(stats *generatorStats) {
// - Perhaps we can avoid if where codeHash is emptyCode
prefix := append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...)
keyLen := len(rawdb.SnapshotStoragePrefix) + 2*common.HashLength
- if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil {
+ if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, false); err != nil {
return err
}
- snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
} else {
- snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
var storeMarker []byte
if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
storeMarker = dl.genMarker[common.HashLength:]
}
onStorage := func(key []byte, val []byte, write bool, delete bool) error {
- defer func(start time.Time) {
- snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds())
- }(time.Now())
-
if delete {
rawdb.DeleteStorageSnapshot(batch, accountHash, common.BytesToHash(key))
- snapWipedStorageMeter.Mark(1)
return nil
}
if write {
rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(key), val)
- snapGeneratedStorageMeter.Mark(1)
} else {
- snapRecoveredStorageMeter.Mark(1)
}
stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val))
stats.slots++
diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go
index b8afbd3ffa..4d9b823946 100644
--- a/core/state/snapshot/snapshot.go
+++ b/core/state/snapshot/snapshot.go
@@ -28,55 +28,12 @@ import (
"github.com/dominant-strategies/go-quai/core/rawdb"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/rlp"
"github.com/dominant-strategies/go-quai/trie"
)
var (
- snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
- snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
- snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
- snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
- snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
-
- snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
- snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
- snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
- snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
- snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
-
- snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
- snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
- snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
- snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
- snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
-
- snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
- snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
- snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
- snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
- snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
-
- snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
- snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
-
- snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
- snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
- snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
- snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
-
- snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
- snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
-
- snapshotBloomAccountTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
- snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
- snapshotBloomAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
-
- snapshotBloomStorageTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
- snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
- snapshotBloomStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
-
// ErrSnapshotStale is returned from data accessors if the underlying snapshot
// layer had been invalidated due to the chain progressing forward far enough
// to not maintain the layer's original state.
@@ -533,7 +490,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
if key := it.Key(); len(key) == 65 { // TODO: We should move this into the iterator
batch.Delete(key)
base.cache.Del(key[1:])
- snapshotFlushStorageItemMeter.Mark(1)
// Ensure we don't delete too much data blindly (contract can be
// huge). It's ok to flush, the root will go missing in case of a
@@ -557,10 +513,6 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
// Push the account to disk
rawdb.WriteAccountSnapshot(batch, hash, data)
base.cache.Set(hash[:], data)
- snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
-
- snapshotFlushAccountItemMeter.Mark(1)
- snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
// Ensure we don't write too much data blindly. It's ok to flush, the
// root will go missing in case of a crash and we'll detect and regen
@@ -589,13 +541,10 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
if len(data) > 0 {
rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
base.cache.Set(append(accountHash[:], storageHash[:]...), data)
- snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
} else {
rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
}
- snapshotFlushStorageItemMeter.Mark(1)
- snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
}
}
// Update the snapshot block marker and write any remainder data
diff --git a/core/state/snapshot/wipe.go b/core/state/snapshot/wipe.go
index de613138df..d7e05f6549 100644
--- a/core/state/snapshot/wipe.go
+++ b/core/state/snapshot/wipe.go
@@ -24,7 +24,6 @@ import (
"github.com/dominant-strategies/go-quai/core/rawdb"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
)
// wipeSnapshot starts a goroutine to iterate over the entire key-value database
@@ -54,10 +53,10 @@ func wipeSnapshot(db ethdb.KeyValueStore, full bool) chan struct{} {
// removed in sync to avoid data races. After all is done, the snapshot range of
// the database is compacted to free up unused data blocks.
func wipeContent(db ethdb.KeyValueStore) error {
- if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, nil, nil, len(rawdb.SnapshotAccountPrefix)+common.HashLength, snapWipedAccountMeter, true); err != nil {
+ if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, nil, nil, len(rawdb.SnapshotAccountPrefix)+common.HashLength, true); err != nil {
return err
}
- if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, nil, nil, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength, snapWipedStorageMeter, true); err != nil {
+ if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, nil, nil, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength, true); err != nil {
return err
}
// Compact the snapshot section of the database to get rid of unused space
@@ -87,7 +86,7 @@ func wipeContent(db ethdb.KeyValueStore) error {
// specifying a particular key range for deletion.
//
// Origin is included for wiping and limit is excluded if they are specified.
-func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, origin []byte, limit []byte, keylen int, meter metrics.Meter, report bool) error {
+func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, origin []byte, limit []byte, keylen int, report bool) error {
// Batch deletions together to avoid holding an iterator for too long
var (
batch = db.NewBatch()
@@ -137,9 +136,6 @@ func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, origin []b
if err := batch.Write(); err != nil {
return err
}
- if meter != nil {
- meter.Mark(int64(items))
- }
if report {
log.Info("Deleted state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start)))
}
diff --git a/core/state/state_object.go b/core/state/state_object.go
index 0e90601f90..385014acd7 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -25,7 +25,8 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/crypto"
- "github.com/dominant-strategies/go-quai/metrics"
+
+ "github.com/dominant-strategies/go-quai/metrics_config"
"github.com/dominant-strategies/go-quai/rlp"
)
@@ -206,25 +207,20 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If no live objects are available, attempt to use snapshots
var (
- enc []byte
- err error
- meter *time.Duration
+ enc []byte
+ err error
+ readStart = time.Now()
)
- readStart := time.Now()
- if metrics.EnabledExpensive {
+
+ if metrics_config.MetricsEnabled() {
// If the snap is 'under construction', the first lookup may fail. If that
// happens, we don't want to double-count the time elapsed. Thus this
// dance with the metering.
defer func() {
- if meter != nil {
- *meter += time.Since(readStart)
- }
+ stateMetrics.WithLabelValues("StorageReads").Add(float64(time.Since(readStart)))
}()
}
if s.db.snap != nil {
- if metrics.EnabledExpensive {
- meter = &s.db.SnapshotStorageReads
- }
// If the object was destructed in *this* block (and potentially resurrected),
// the storage has been cleared out, and we should *not* consult the previous
// snapshot about any storage values. The only possible alternatives are:
@@ -238,15 +234,12 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If snapshot unavailable or reading from it failed, load from the database
if s.db.snap == nil || err != nil {
- if meter != nil {
+ if stateMetrics.WithLabelValues("StorageReads") != nil {
// If we already spent time checking the snapshot, account for it
// and reset the readStart
- *meter += time.Since(readStart)
+ stateMetrics.WithLabelValues("StorageReads").Add(float64(time.Since(readStart)))
readStart = time.Now()
}
- if metrics.EnabledExpensive {
- meter = &s.db.StorageReads
- }
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
s.setError(err)
return common.Hash{}
@@ -334,8 +327,8 @@ func (s *stateObject) updateTrie(db Database) Trie {
return s.trie
}
// Track the amount of time wasted on updating the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("StorageUpdates").Add(float64(time.Since(start))) }(time.Now())
}
// The snapshot storage map for the object
var storage map[common.Hash][]byte
@@ -388,8 +381,8 @@ func (s *stateObject) updateRoot(db Database) {
return
}
// Track the amount of time wasted on hashing the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("StorageHashes").Add(float64(time.Since(start))) }(time.Now())
}
s.data.Root = s.trie.Hash()
}
@@ -405,8 +398,8 @@ func (s *stateObject) CommitTrie(db Database) error {
return s.dbErr
}
// Track the amount of time wasted on committing the storage trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("StorageCommits").Add(float64(time.Since(start))) }(time.Now())
}
root, err := s.trie.Commit(nil)
if err == nil {
diff --git a/core/state/statedb.go b/core/state/statedb.go
index f1c955e82a..a798df7b42 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -24,13 +24,16 @@ import (
"sort"
"time"
+ "github.com/dominant-strategies/go-quai/metrics_config"
+
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/core/rawdb"
"github.com/dominant-strategies/go-quai/core/state/snapshot"
"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/crypto"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+ "github.com/prometheus/client_golang/prometheus"
+
"github.com/dominant-strategies/go-quai/rlp"
"github.com/dominant-strategies/go-quai/trie"
)
@@ -56,6 +59,29 @@ func (n *proofList) Delete(key []byte) error {
panic("not supported")
}
+var (
+ stateMetrics *prometheus.GaugeVec
+)
+
+func init() {
+ registerMetrics()
+}
+
+func registerMetrics() {
+ stateMetrics = metrics_config.NewGaugeVec("StateTimes", "Time spent doing state operations")
+ stateMetrics.WithLabelValues("AccountReads")
+ stateMetrics.WithLabelValues("AccountHashes")
+ stateMetrics.WithLabelValues("AccountUpdates")
+ stateMetrics.WithLabelValues("AccountCommits")
+ stateMetrics.WithLabelValues("StorageReads")
+ stateMetrics.WithLabelValues("StorageHashes")
+ stateMetrics.WithLabelValues("StorageUpdates")
+ stateMetrics.WithLabelValues("StorageCommits")
+ stateMetrics.WithLabelValues("SnapshotAccountReads")
+ stateMetrics.WithLabelValues("SnapshotStorageReads")
+ stateMetrics.WithLabelValues("SnapshotCommits")
+}
+
// StateDB structs within the Quai protocol are used to store anything
// within the merkle trie. StateDBs take care of caching and storing
// nested states. It's the general query interface to retrieve:
@@ -104,19 +130,6 @@ type StateDB struct {
journal *journal
validRevisions []revision
nextRevisionId int
-
- // Measurements gathered during execution for debugging purposes
- AccountReads time.Duration
- AccountHashes time.Duration
- AccountUpdates time.Duration
- AccountCommits time.Duration
- StorageReads time.Duration
- StorageHashes time.Duration
- StorageUpdates time.Duration
- StorageCommits time.Duration
- SnapshotAccountReads time.Duration
- SnapshotStorageReads time.Duration
- SnapshotCommits time.Duration
}
// New creates a new state from a given trie.
@@ -146,6 +159,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte)
}
}
+
return sdb, nil
}
@@ -450,8 +464,8 @@ func (s *StateDB) Suicide(addr common.InternalAddress) bool {
// updateStateObject writes the given object to the trie.
func (s *StateDB) updateStateObject(obj *stateObject) {
// Track the amount of time wasted on updating the account from the trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("AccountUpdates").Add(float64(time.Since(start))) }(time.Now())
}
// Encode the account and update the account trie
addr := obj.Address()
@@ -476,8 +490,8 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
// deleteStateObject removes the given object from the state trie.
func (s *StateDB) deleteStateObject(obj *stateObject) {
// Track the amount of time wasted on deleting the account from the trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("AccountUpdates").Add(float64(time.Since(start))) }(time.Now())
}
// Delete the account from the trie
addr := obj.Address()
@@ -511,8 +525,10 @@ func (s *StateDB) getDeletedStateObject(addr common.InternalAddress) *stateObjec
err error
)
if s.snap != nil {
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) {
+ stateMetrics.WithLabelValues("SnapshotAccountReads").Add(float64(time.Since(start)))
+ }(time.Now())
}
var acc *snapshot.Account
if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
@@ -535,8 +551,8 @@ func (s *StateDB) getDeletedStateObject(addr common.InternalAddress) *stateObjec
}
// If snapshot unavailable or reading from it failed, load from the database
if s.snap == nil || err != nil {
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("AccountReads").Add(float64(time.Since(start))) }(time.Now())
}
enc, err := s.trie.TryGet(addr.Bytes())
if err != nil {
@@ -871,8 +887,8 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.stateObjectsPending = make(map[common.InternalAddress]struct{})
}
// Track the amount of time wasted on hashing the account trie
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("AccountHashes").Add(float64(time.Since(start))) }(time.Now())
}
return s.trie.Hash()
}
@@ -926,7 +942,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
// Write the account trie changes, measuing the amount of wasted time
var start time.Time
- if metrics.EnabledExpensive {
+ if metrics_config.MetricsEnabled() {
start = time.Now()
}
// The onleaf func is called _serially_, so we can reuse the same account
@@ -941,13 +957,13 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
}
return nil
})
- if metrics.EnabledExpensive {
- s.AccountCommits += time.Since(start)
+ if metrics_config.MetricsEnabled() {
+ stateMetrics.WithLabelValues("AccountCommits").Add(float64(time.Since(start)))
}
// If snapshotting is enabled, update the snapshot tree with this new version
if s.snap != nil {
- if metrics.EnabledExpensive {
- defer func(start time.Time) { s.SnapshotCommits += time.Since(start) }(time.Now())
+ if metrics_config.MetricsEnabled() {
+ defer func(start time.Time) { stateMetrics.WithLabelValues("SnapshotCommits").Add(float64(time.Since(start))) }(time.Now())
}
// Only update if there's a state transition (skip empty Clique blocks)
if parent := s.snap.Root(); parent != root {
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index aca3ef6239..4b72149a1b 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -21,7 +21,7 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+ "github.com/dominant-strategies/go-quai/metrics_config"
)
var (
@@ -39,35 +39,14 @@ type triePrefetcher struct {
root common.Hash // Root hash of theaccount trie for metrics
fetches map[common.Hash]Trie // Partially or fully fetcher tries
fetchers map[common.Hash]*subfetcher // Subfetchers for each trie
-
- deliveryMissMeter metrics.Meter
- accountLoadMeter metrics.Meter
- accountDupMeter metrics.Meter
- accountSkipMeter metrics.Meter
- accountWasteMeter metrics.Meter
- storageLoadMeter metrics.Meter
- storageDupMeter metrics.Meter
- storageSkipMeter metrics.Meter
- storageWasteMeter metrics.Meter
}
// newTriePrefetcher
func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher {
- prefix := triePrefetchMetricsPrefix + namespace
p := &triePrefetcher{
db: db,
root: root,
fetchers: make(map[common.Hash]*subfetcher), // Active prefetchers use the fetchers map
-
- deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil),
- accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil),
- accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil),
- accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil),
- accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil),
- storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil),
- storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil),
- storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil),
- storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil),
}
return p
}
@@ -78,25 +57,15 @@ func (p *triePrefetcher) close() {
for _, fetcher := range p.fetchers {
fetcher.abort() // safe to do multiple times
- if metrics.Enabled {
+ if metrics_config.MetricsEnabled(){
if fetcher.root == p.root {
- p.accountLoadMeter.Mark(int64(len(fetcher.seen)))
- p.accountDupMeter.Mark(int64(fetcher.dups))
- p.accountSkipMeter.Mark(int64(len(fetcher.tasks)))
-
for _, key := range fetcher.used {
delete(fetcher.seen, string(key))
}
- p.accountWasteMeter.Mark(int64(len(fetcher.seen)))
} else {
- p.storageLoadMeter.Mark(int64(len(fetcher.seen)))
- p.storageDupMeter.Mark(int64(fetcher.dups))
- p.storageSkipMeter.Mark(int64(len(fetcher.tasks)))
-
for _, key := range fetcher.used {
delete(fetcher.seen, string(key))
}
- p.storageWasteMeter.Mark(int64(len(fetcher.seen)))
}
}
}
@@ -113,16 +82,6 @@ func (p *triePrefetcher) copy() *triePrefetcher {
db: p.db,
root: p.root,
fetches: make(map[common.Hash]Trie), // Active prefetchers use the fetches map
-
- deliveryMissMeter: p.deliveryMissMeter,
- accountLoadMeter: p.accountLoadMeter,
- accountDupMeter: p.accountDupMeter,
- accountSkipMeter: p.accountSkipMeter,
- accountWasteMeter: p.accountWasteMeter,
- storageLoadMeter: p.storageLoadMeter,
- storageDupMeter: p.storageDupMeter,
- storageSkipMeter: p.storageSkipMeter,
- storageWasteMeter: p.storageWasteMeter,
}
// If the prefetcher is already a copy, duplicate the data
if p.fetches != nil {
@@ -160,7 +119,6 @@ func (p *triePrefetcher) trie(root common.Hash) Trie {
if p.fetches != nil {
trie := p.fetches[root]
if trie == nil {
- p.deliveryMissMeter.Mark(1)
return nil
}
return p.db.CopyTrie(trie)
@@ -168,7 +126,6 @@ func (p *triePrefetcher) trie(root common.Hash) Trie {
// Otherwise the prefetcher is active, bail if no trie was prefetched for this root
fetcher := p.fetchers[root]
if fetcher == nil {
- p.deliveryMissMeter.Mark(1)
return nil
}
// Interrupt the prefetcher if it's by any chance still running and return
@@ -177,7 +134,6 @@ func (p *triePrefetcher) trie(root common.Hash) Trie {
trie := fetcher.peek()
if trie == nil {
- p.deliveryMissMeter.Mark(1)
return nil
}
return trie
diff --git a/core/state_processor.go b/core/state_processor.go
index e14f74d087..56cc569ea5 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -35,26 +35,13 @@ import (
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/event"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/params"
"github.com/dominant-strategies/go-quai/trie"
lru "github.com/hashicorp/golang-lru"
)
var (
- accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil)
- accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil)
- accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil)
- accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil)
-
- storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil)
- storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil)
- storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil)
- storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil)
-
- snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil)
- snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil)
- snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil)
)
const (
diff --git a/core/tx_list.go b/core/tx_list.go
index ceff32254c..a3497832b8 100644
--- a/core/tx_list.go
+++ b/core/tx_list.go
@@ -21,7 +21,6 @@ import (
"math"
"math/big"
"sort"
- "time"
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/core/types"
@@ -594,7 +593,6 @@ func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool)
// Reheap forcibly rebuilds the heap based on the current remote transaction set.
func (l *txPricedList) Reheap() {
- start := time.Now()
l.stales = 0
l.urgent.list = make([]*types.Transaction, 0, l.all.RemoteCount())
l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool {
@@ -614,7 +612,6 @@ func (l *txPricedList) Reheap() {
l.floating.list[i] = heap.Pop(&l.urgent).(*types.Transaction)
}
heap.Init(&l.floating)
- reheapTimer.Update(time.Since(start))
}
// SetBaseFee updates the base fee and triggers a re-heap. Note that Removed is not
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 93d4e02dc5..eaeeeac262 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -32,7 +32,8 @@ import (
"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/event"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+ "github.com/dominant-strategies/go-quai/metrics_config"
+
"github.com/dominant-strategies/go-quai/params"
orderedmap "github.com/wk8/go-ordered-map/v2"
)
@@ -98,32 +99,33 @@ var (
)
var (
- // Metrics for the pending pool
- pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil)
- pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil)
- pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
- pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds
+ txpoolMetrics = metrics_config.NewGaugeVec("TxpoolGauges", "Txpool gauges")
+ // Pending pool metrics
+ pendingDiscardMeter = txpoolMetrics.WithLabelValues("pending:discard")
+ pendingReplaceMeter = txpoolMetrics.WithLabelValues("pending:replace")
+ pendingRateLimitMeter = txpoolMetrics.WithLabelValues("pending:rateLimit") // Dropped due to rate limiting
+ pendingNofundsMeter = txpoolMetrics.WithLabelValues("pending:noFunds") // Dropped due to out-of-funds
// Metrics for the queued pool
- queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil)
- queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil)
- queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
- queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
- queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime
+ queuedDiscardMeter = txpoolMetrics.WithLabelValues("queued:discard")
+ queuedReplaceMeter = txpoolMetrics.WithLabelValues("queued:replace")
+ queuedRateLimitMeter = txpoolMetrics.WithLabelValues("queued:ratelimit") // Dropped due to rate limiting
+ queuedNofundsMeter = txpoolMetrics.WithLabelValues("queued:nofund") // Dropped due to out-of-funds
+ queuedEvictionMeter = txpoolMetrics.WithLabelValues("queued:eviction") // Dropped due to lifetime
// General tx metrics
- knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil)
- validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil)
- invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil)
- underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil)
- overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil)
-
- pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
- queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
- localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
- slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
-
- reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil)
+ knownTxMeter = txpoolMetrics.WithLabelValues("known") // Known transaction
+ validTxMeter = txpoolMetrics.WithLabelValues("valid") // Valid transaction
+ invalidTxMeter = txpoolMetrics.WithLabelValues("invalid") // Invalid transaction
+ underpricedTxMeter = txpoolMetrics.WithLabelValues("underpriced") // Underpriced transaction
+ overflowedTxMeter = txpoolMetrics.WithLabelValues("overflowed") // Overflowed transaction
+
+ pendingGauge = txpoolMetrics.WithLabelValues("pending")
+ queuedGauge = txpoolMetrics.WithLabelValues("queued")
+ localGauge = txpoolMetrics.WithLabelValues("local")
+ slotsGauge = txpoolMetrics.WithLabelValues("slots")
+
+ reheapTimer = metrics_config.NewTimer("Reheap", "Reheap timer")
)
// TxStatus is the current status of a transaction as seen by the pool.
@@ -283,6 +285,31 @@ type newSender struct {
// NewTxPool creates a new transaction pool to gather, sort and filter inbound
// transactions from the network.
func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool {
+
+ // Pending pool metrics
+ pendingDiscardMeter.Set(0)
+ pendingReplaceMeter.Set(0)
+ pendingRateLimitMeter.Set(0)
+ pendingNofundsMeter.Set(0)
+
+ // Metrics for the queued pool
+ queuedDiscardMeter.Set(0)
+ queuedReplaceMeter.Set(0)
+ queuedRateLimitMeter.Set(0)
+ queuedNofundsMeter.Set(0)
+ queuedEvictionMeter.Set(0)
+
+ // General tx metrics
+ knownTxMeter.Set(0)
+ validTxMeter.Set(0)
+ invalidTxMeter.Set(0)
+ underpricedTxMeter.Set(0)
+ overflowedTxMeter.Set(0)
+
+ pendingGauge.Set(0)
+ queuedGauge.Set(0)
+ localGauge.Set(0)
+
// Sanitize the input to ensure no vulnerable gas prices are set
config = (&config).sanitize()
@@ -398,7 +425,7 @@ func (pool *TxPool) loop() {
for _, tx := range list {
pool.removeTx(tx.Hash(), true)
}
- queuedEvictionMeter.Mark(int64(len(list)))
+ queuedEvictionMeter.Add(float64(len(list)))
}
}
pool.mu.Unlock()
@@ -701,7 +728,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
hash := tx.Hash()
if pool.all.Get(hash) != nil {
log.Trace("Discarding already known transaction", "hash", hash)
- knownTxMeter.Mark(1)
+ knownTxMeter.Add(1)
return false, ErrAlreadyKnown
}
// Make the local flag. If it's from local source or it's from the network but
@@ -711,7 +738,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// If the transaction fails basic validation, discard it
if err := pool.validateTx(tx, isLocal); err != nil {
log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
- invalidTxMeter.Mark(1)
+ invalidTxMeter.Add(1)
return false, err
}
// If the transaction pool is full, discard underpriced transactions
@@ -719,7 +746,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// If the new transaction is underpriced, don't accept it
if !isLocal && pool.priced.Underpriced(tx) {
log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
- underpricedTxMeter.Mark(1)
+ underpricedTxMeter.Add(1)
return false, ErrUnderpriced
}
// New transaction is better than our worse ones, make room for it.
@@ -730,13 +757,13 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// Special case, we still can't make the room for the new remote one.
if !isLocal && !success {
log.Trace("Discarding overflown transaction", "hash", hash)
- overflowedTxMeter.Mark(1)
+ overflowedTxMeter.Add(1)
return false, ErrTxPoolOverflow
}
// Kick out the underpriced remote transactions.
for _, tx := range drop {
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap())
- underpricedTxMeter.Mark(1)
+ pendingDiscardMeter.Add(1)
pool.removeTx(tx.Hash(), false)
}
}
@@ -750,14 +777,14 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
// Nonce already pending, check if required price bump is met
inserted, old := list.Add(tx, pool.config.PriceBump)
if !inserted {
- pendingDiscardMeter.Mark(1)
+ pendingDiscardMeter.Add(1)
return false, ErrReplaceUnderpriced
}
// New transaction is better, replace old one
if old != nil {
pool.all.Remove(old.Hash())
pool.priced.Removed(1)
- pendingReplaceMeter.Mark(1)
+ pendingReplaceMeter.Add(1)
}
pool.all.Add(tx, isLocal)
pool.priced.Put(tx, isLocal)
@@ -781,7 +808,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time.
}
if isLocal {
- localGauge.Inc(1)
+ localGauge.Add(1)
}
pool.journalTx(internal, tx)
pool.queueTxEvent(tx)
@@ -805,17 +832,17 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local boo
inserted, old := pool.queue[internal].Add(tx, pool.config.PriceBump)
if !inserted {
// An older transaction was better, discard this
- queuedDiscardMeter.Mark(1)
+ queuedDiscardMeter.Add(1)
return false, ErrReplaceUnderpriced
}
// Discard any previous transaction and mark this
if old != nil {
pool.all.Remove(old.Hash())
pool.priced.Removed(1)
- queuedReplaceMeter.Mark(1)
+ queuedReplaceMeter.Add(1)
} else {
// Nothing was replaced, bump the queued counter
- queuedGauge.Inc(1)
+ queuedGauge.Add(1)
}
// If the transaction isn't in lookup set but it's expected to be there,
// show the error log.
@@ -861,17 +888,17 @@ func (pool *TxPool) promoteTx(addr common.InternalAddress, hash common.Hash, tx
// An older transaction was better, discard this
pool.all.Remove(hash)
pool.priced.Removed(1)
- pendingDiscardMeter.Mark(1)
+ pendingDiscardMeter.Add(1)
return false
}
// Otherwise discard any previous transaction and mark this
if old != nil {
pool.all.Remove(old.Hash())
pool.priced.Removed(1)
- pendingReplaceMeter.Mark(1)
+ pendingReplaceMeter.Add(1)
} else {
// Nothing was replaced, bump the pending counter
- pendingGauge.Inc(1)
+ pendingGauge.Add(1)
}
// Set the potentially new pending nonce and notify any subsystems of the new tx
pool.pendingNonces.set(addr, tx.Nonce()+1)
@@ -942,7 +969,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
// If the transaction is known, pre-set the error slot
if pool.all.Get(tx.Hash()) != nil {
errs[i] = ErrAlreadyKnown
- knownTxMeter.Mark(1)
+ knownTxMeter.Add(1)
continue
}
// Exclude transactions with invalid signatures as soon as
@@ -953,7 +980,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
_, err = sender.InternalAddress()
if err != nil {
errs[i] = err
- invalidTxMeter.Mark(1)
+ invalidTxMeter.Add(1)
continue
}
} else if _, found := pool.GetSender(tx.Hash()); found {
@@ -962,13 +989,19 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
from, err := types.Sender(pool.signer, tx)
if err != nil {
errs[i] = ErrInvalidSender
- invalidTxMeter.Mark(1)
+ invalidTxMeter.Add(1)
continue
}
_, err = from.InternalAddress()
if err != nil {
errs[i] = ErrInvalidSender
- invalidTxMeter.Mark(1)
+ invalidTxMeter.Add(1)
+ continue
+ }
+ _, err = types.Sender(pool.signer, tx)
+ if err != nil {
+ errs[i] = ErrInvalidSender
+ invalidTxMeter.Add(1)
continue
}
}
@@ -1013,7 +1046,7 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error,
dirty.addTx(tx)
}
}
- validTxMeter.Mark(int64(len(dirty.accounts)))
+ validTxMeter.Add(float64(len(dirty.accounts)))
return errs, dirty
}
@@ -1074,7 +1107,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
pool.priced.Removed(1)
}
if pool.locals.contains(internal) {
- localGauge.Dec(1)
+ localGauge.Dec()
}
// Remove the transaction from the pending lists and reset the account nonce
if pending := pool.pending[internal]; pending != nil {
@@ -1091,7 +1124,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
// Update the account nonce if needed
pool.pendingNonces.setIfLower(internal, tx.Nonce())
// Reduce the pending counter
- pendingGauge.Dec(int64(1 + len(invalids)))
+ pendingGauge.Sub(float64(len(invalids) + 1))
return
}
}
@@ -1099,7 +1132,7 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
if future := pool.queue[internal]; future != nil {
if removed, _ := future.Remove(tx); removed {
// Reduce the queued counter
- queuedGauge.Dec(1)
+ queuedGauge.Dec()
}
if future.Empty() {
delete(pool.queue, internal)
@@ -1435,7 +1468,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ
pool.all.Remove(hash)
}
log.Trace("Removed unpayable queued transactions", "count", len(drops))
- queuedNofundsMeter.Mark(int64(len(drops)))
+ queuedNofundsMeter.Add(float64(len(drops)))
// Gather all executable transactions and promote them
readies := list.Ready(pool.pendingNonces.get(addr))
@@ -1446,7 +1479,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ
}
}
log.Trace("Promoted queued transactions", "count", len(promoted))
- queuedGauge.Dec(int64(len(readies)))
+ queuedGauge.Sub(float64(len(readies)))
// Drop all transactions over the allowed limit
var caps types.Transactions
@@ -1457,13 +1490,13 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ
pool.all.Remove(hash)
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
}
- queuedRateLimitMeter.Mark(int64(len(caps)))
+ queuedRateLimitMeter.Add(float64(len(caps)))
}
// Mark all the items dropped as removed
pool.priced.Removed(len(forwards) + len(drops) + len(caps))
- queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ queuedRateLimitMeter.Sub(float64(len(forwards) + len(drops) + len(caps)))
if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(forwards) + len(drops) + len(caps)))
+ localGauge.Sub(float64(len(forwards) + len(drops) + len(caps)))
}
// Delete the entire queue entry if it became empty.
if list.Empty() {
@@ -1530,9 +1563,9 @@ func (pool *TxPool) truncatePending() {
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
}
pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
+ pendingGauge.Sub(float64(len(caps)))
if pool.locals.contains(offenders[i]) {
- localGauge.Dec(int64(len(caps)))
+ localGauge.Sub(float64(len(caps)))
}
pending--
}
@@ -1557,15 +1590,15 @@ func (pool *TxPool) truncatePending() {
log.Trace("Removed fairness-exceeding pending transaction", "hash", hash)
}
pool.priced.Removed(len(caps))
- pendingGauge.Dec(int64(len(caps)))
+ pendingGauge.Sub(float64(len(caps)))
if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(caps)))
+ localGauge.Sub(float64(len(caps)))
}
pending--
}
}
}
- pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
+ pendingRateLimitMeter.Add(float64(pendingBeforeCap - pending))
if pool.reOrgCounter == c_reorgCounterThreshold {
log.Debug("Time taken to truncatePending", "time", common.PrettyDuration(time.Since(start)))
}
@@ -1607,7 +1640,7 @@ func (pool *TxPool) truncateQueue() {
pool.removeTx(tx.Hash(), true)
}
drop -= size
- queuedRateLimitMeter.Mark(int64(size))
+ queuedRateLimitMeter.Add(float64(size))
continue
}
// Otherwise drop only last few transactions
@@ -1615,7 +1648,7 @@ func (pool *TxPool) truncateQueue() {
for i := len(txs) - 1; i >= 0 && drop > 0; i-- {
pool.removeTx(txs[i].Hash(), true)
drop--
- queuedRateLimitMeter.Mark(1)
+ queuedRateLimitMeter.Add(1)
}
}
if pool.reOrgCounter == c_reorgCounterThreshold {
@@ -1653,7 +1686,7 @@ func (pool *TxPool) demoteUnexecutables() {
log.Trace("Removed unpayable pending transaction", "hash", hash)
pool.all.Remove(hash)
}
- pendingNofundsMeter.Mark(int64(len(drops)))
+ pendingNofundsMeter.Add(float64(len(drops)))
for _, tx := range invalids {
hash := tx.Hash()
@@ -1662,9 +1695,10 @@ func (pool *TxPool) demoteUnexecutables() {
// Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(hash, tx, false, false)
}
- pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+ removedTxs := float64(len(olds) + len(drops) + len(invalids))
+ pendingGauge.Sub(removedTxs)
if pool.locals.contains(addr) {
- localGauge.Dec(int64(len(olds) + len(drops) + len(invalids)))
+ localGauge.Sub(removedTxs)
}
// If there's a gap in front, alert (should never happen) and postpone all transactions
if list.Len() > 0 && list.txs.Get(nonce) == nil {
@@ -1676,7 +1710,7 @@ func (pool *TxPool) demoteUnexecutables() {
// Internal shuffle shouldn't touch the lookup set.
pool.enqueueTx(hash, tx, false, false)
}
- pendingGauge.Dec(int64(len(gapped)))
+ pendingGauge.Sub(float64(len(gapped)))
}
// Delete the entire pending entry if it became empty.
if list.Empty() {
@@ -1940,7 +1974,7 @@ func (t *txLookup) Add(tx *types.Transaction, local bool) {
defer t.lock.Unlock()
t.slots += numSlots(tx)
- slotsGauge.Update(int64(t.slots))
+ slotsGauge.Set(float64(t.slots))
if local {
t.locals[tx.Hash()] = tx
@@ -1963,7 +1997,7 @@ func (t *txLookup) Remove(hash common.Hash) {
return
}
t.slots -= numSlots(tx)
- slotsGauge.Update(int64(t.slots))
+ slotsGauge.Set(float64(t.slots))
delete(t.locals, hash)
delete(t.remotes, hash)
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 7a3d2f5e97..25e74aee3c 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -29,7 +29,6 @@ import (
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/event"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
"math/big"
"sync"
"sync/atomic"
@@ -544,7 +543,6 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
// Create a timeout timer, and the associated header fetcher
skeleton := true
skeletonHeaders := make([]*types.Header, 0)
- request := time.Now() // time of the last skeleton fetch request
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
<-timeout.C // timeout channel should be initially empty
defer timeout.Stop()
@@ -567,7 +565,6 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
var ttl time.Duration
getHeaders := func(from uint64, to uint64) {
- request = time.Now()
if skeleton {
timeout.Reset(1 * time.Minute)
@@ -630,7 +627,6 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
break
}
- headerReqTimer.UpdateSince(request)
timeout.Stop()
headers := packet.(*headerPack).headers
@@ -726,7 +722,6 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
}
// Header retrieval timed out, consider the peer bad and drop
p.log.Debug("Header request timed out", "elapsed", ttl)
- headerTimeoutMeter.Mark(1)
d.dropPeer(p.id)
// Finish the sync gracefully instead of dumping the gathered data though
@@ -973,7 +968,6 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack)
}
if throttle {
throttled = true
- throttleCounter.Inc(1)
}
if request == nil {
continue
@@ -1170,21 +1164,19 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error {
// DeliverHeaders injects a new batch of block headers received from a remote
// node into the download schedule.
func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
- return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
+ return d.deliver(d.headerCh, &headerPack{id, headers})
}
// DeliverBodies injects a new batch of block bodies received from a remote node.
func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header, extTransactions [][]*types.Transaction, manifests []types.BlockManifest) error {
- return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles, extTransactions, manifests}, bodyInMeter, bodyDropMeter)
+ return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles, extTransactions, manifests})
}
// deliver injects a new batch of data received from a remote node.
-func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
+func (d *Downloader) deliver(destCh chan dataPack, packet dataPack) (err error) {
// Update the delivery metrics for both good and failed deliveries
- inMeter.Mark(int64(packet.Items()))
defer func() {
if err != nil {
- dropMeter.Mark(int64(packet.Items()))
}
}()
// Deliver or abort if the sync is canceled while queuing
diff --git a/eth/downloader/metrics.go b/eth/downloader/metrics.go
deleted file mode 100644
index d13b6fac46..0000000000
--- a/eth/downloader/metrics.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Contains the metrics collected by the downloader.
-
-package downloader
-
-import (
- "github.com/dominant-strategies/go-quai/metrics"
-)
-
-var (
- headerInMeter = metrics.NewRegisteredMeter("eth/downloader/headers/in", nil)
- headerReqTimer = metrics.NewRegisteredTimer("eth/downloader/headers/req", nil)
- headerDropMeter = metrics.NewRegisteredMeter("eth/downloader/headers/drop", nil)
- headerTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/headers/timeout", nil)
-
- bodyInMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/in", nil)
- bodyReqTimer = metrics.NewRegisteredTimer("eth/downloader/bodies/req", nil)
- bodyDropMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/drop", nil)
- bodyTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/timeout", nil)
-
- throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil)
-)
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index f3ff39f04e..74ce476952 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -30,7 +30,7 @@ import (
"github.com/dominant-strategies/go-quai/common/prque"
"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/trie"
)
@@ -569,7 +569,7 @@ func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {
q.lock.Lock()
defer q.lock.Unlock()
- return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)
+ return q.expire(timeout, q.headerPendPool, q.headerTaskQueue)
}
// ExpireBodies checks for in flight block body requests that exceeded a timeout
@@ -578,7 +578,7 @@ func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
q.lock.Lock()
defer q.lock.Unlock()
- return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)
+ return q.expire(timeout, q.blockPendPool, q.blockTaskQueue)
}
// expire is the generic check that move expired tasks from a pending pool back
@@ -587,14 +587,11 @@ func (q *queue) ExpireBodies(timeout time.Duration) map[string]int {
// Note, this method expects the queue lock to be already held. The
// reason the lock is not obtained in here is because the parameters already need
// to access the queue, so they already need a lock anyway.
-func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {
+func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque) map[string]int {
// Iterate over the expired requests and return each to the queue
expiries := make(map[string]int)
for id, request := range pendPool {
if time.Since(request.Time) > timeout {
- // Update the metrics with the timeout
- timeoutMeter.Mark(1)
-
// Return any non satisfied requests to the pool
if request.From > 0 {
taskQueue.Push(request.From, -int64(request.From))
@@ -635,7 +632,6 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh
if request == nil {
return 0, errNoFetchesPending
}
- headerReqTimer.UpdateSince(request.Time)
delete(q.headerPendPool, id)
// Ensure headers can be mapped onto the skeleton chain
@@ -760,7 +756,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
result.SetBodyDone()
}
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,
- bodyReqTimer, len(txLists), validate, reconstruct)
+ len(txLists), validate, reconstruct)
}
// deliver injects a data retrieval response into the results queue.
@@ -769,7 +765,7 @@ func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLi
// reason this lock is not obtained in here is because the parameters already need
// to access the queue, so they already need a lock anyway.
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
- taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,
+ taskQueue *prque.Prque, pendPool map[string]*fetchRequest,
results int, validate func(index int, header *types.Header) error,
reconstruct func(index int, result *fetchResult)) (int, error) {
@@ -778,7 +774,6 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
if request == nil {
return 0, errNoFetchesPending
}
- reqTimer.UpdateSince(request.Time)
delete(pendPool, id)
// If no data items were retrieved, mark them as unavailable for the origin peer
diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go
index adddd89496..9145cc7e66 100644
--- a/eth/fetcher/block_fetcher.go
+++ b/eth/fetcher/block_fetcher.go
@@ -28,7 +28,7 @@ import (
"github.com/dominant-strategies/go-quai/consensus"
"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/trie"
)
@@ -47,25 +47,6 @@ const (
maxAllowableEntropyDist = 3500 // Maximum multiple of zone intrinsic S distance allowed from the current Entropy
)
-var (
- blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
- blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
- blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
- blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
-
- blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
- blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
- blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
-
- headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
- bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
-
- headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
- headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
- bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
- bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
-)
-
var errTerminated = errors.New("terminated")
// blockRetrievalFn is a callback type for retrieving a block from the local chain.
@@ -359,19 +340,16 @@ func (f *BlockFetcher) loop() {
case notification := <-f.notify:
// A block was announced, make sure the peer isn't DOSing us
- blockAnnounceInMeter.Mark(1)
count := f.announces[notification.origin] + 1
if count > hashLimit {
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
- blockAnnounceDOSMeter.Mark(1)
break
}
// If we have a valid block number, check that it's potentially useful
if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
- blockAnnounceDropMeter.Mark(1)
break
}
}
@@ -393,7 +371,6 @@ func (f *BlockFetcher) loop() {
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
- blockBroadcastInMeter.Mark(1)
f.enqueue(op.origin, nil, op.block)
@@ -433,7 +410,6 @@ func (f *BlockFetcher) loop() {
f.fetchingHook(hashes)
}
for _, hash := range hashes {
- headerFetchMeter.Mark(1)
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
}
}()
@@ -464,7 +440,6 @@ func (f *BlockFetcher) loop() {
if f.completingHook != nil {
f.completingHook(hashes)
}
- bodyFetchMeter.Mark(int64(len(hashes)))
go f.completing[hashes[0]].fetchBodies(hashes)
}
// Schedule the next fetch if blocks are still pending
@@ -480,7 +455,6 @@ func (f *BlockFetcher) loop() {
case <-f.quit:
return
}
- headerFilterInMeter.Mark(int64(len(task.headers)))
// Split the batch of headers into unknown ones (to return to the caller),
// known incomplete ones (requiring body retrievals) and completed blocks.
@@ -524,7 +498,6 @@ func (f *BlockFetcher) loop() {
unknown = append(unknown, header)
}
}
- headerFilterOutMeter.Mark(int64(len(unknown)))
select {
case filter <- &headerFilterTask{headers: unknown, time: task.time}:
case <-f.quit:
@@ -560,7 +533,6 @@ func (f *BlockFetcher) loop() {
case <-f.quit:
return
}
- bodyFilterInMeter.Mark(int64(len(task.transactions)))
blocks := []*types.Block{}
// abort early if there's nothing explicitly requested
if len(f.completing) > 0 {
@@ -622,7 +594,6 @@ func (f *BlockFetcher) loop() {
}
}
}
- bodyFilterOutMeter.Mark(int64(len(task.transactions)))
select {
case filter <- task:
case <-f.quit:
@@ -735,7 +706,6 @@ func (f *BlockFetcher) ImportBlocks(peer string, block *types.Block, relay bool)
// block through and broadcast the block.
if err == nil || err.Error() == consensus.ErrUnknownAncestor.Error() {
// All ok, quickly propagate to our peers
- blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
// Only relay the Mined Blocks that meet the depth criteria
if relay {
@@ -752,7 +722,6 @@ func (f *BlockFetcher) ImportBlocks(peer string, block *types.Block, relay bool)
// TODO: verify the Headers work to be in a certain threshold window
f.writeBlock(block)
// If import succeeded, broadcast the block
- blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
// Only relay the Mined Blocks that meet the depth criteria
if relay {
diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go
index 9ec083cc1d..23864f28d1 100644
--- a/eth/fetcher/tx_fetcher.go
+++ b/eth/fetcher/tx_fetcher.go
@@ -29,7 +29,7 @@ import (
"github.com/dominant-strategies/go-quai/core"
"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+ "github.com/dominant-strategies/go-quai/metrics_config"
)
const (
@@ -68,32 +68,33 @@ var (
)
var (
- txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
- txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
- txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
- txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
-
- txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
- txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
- txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
- txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
-
- txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
- txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
- txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
- txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
-
- txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
- txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
- txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
- txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
-
- txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
- txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
- txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
- txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
- txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
- txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
+ transactionMetrics = metrics_config.NewGaugeVec("BroadcastTxs", "Metrics related to transactions sent and received")
+ txAnnounceInMeter = transactionMetrics.WithLabelValues("announces:in")
+ txAnnounceKnownMeter = transactionMetrics.WithLabelValues("known:in")
+ txAnnounceUnderpricedMeter = transactionMetrics.WithLabelValues("underpriced:in")
+ txAnnounceDOSMeter = transactionMetrics.WithLabelValues("announces:dos")
+
+ txBroadcastInMeter = transactionMetrics.WithLabelValues("broadcasts:in")
+ txBroadcastKnownMeter = transactionMetrics.WithLabelValues("broadcasts:known")
+ txBroadcastUnderpricedMeter = transactionMetrics.WithLabelValues("broadcasts:underpriced")
+ txBroadcastOtherRejectMeter = transactionMetrics.WithLabelValues("broadcasts:otherreject")
+
+ txRequestOutMeter = transactionMetrics.WithLabelValues("request:out")
+ txRequestFailMeter = transactionMetrics.WithLabelValues("request:fail")
+ txRequestDoneMeter = transactionMetrics.WithLabelValues("request:done")
+ txRequestTimeoutMeter = transactionMetrics.WithLabelValues("request:timeout")
+
+ txReplyInMeter = transactionMetrics.WithLabelValues("replies:in")
+ txReplyKnownMeter = transactionMetrics.WithLabelValues("replies:known")
+ txReplyUnderpricedMeter = transactionMetrics.WithLabelValues("replies:underpriced")
+ txReplyOtherRejectMeter = transactionMetrics.WithLabelValues("replies:otherreject")
+
+ txFetcherWaitingPeers = transactionMetrics.WithLabelValues("waiting:peers")
+ txFetcherWaitingHashes = transactionMetrics.WithLabelValues("waiting:hashes")
+ txFetcherQueueingPeers = transactionMetrics.WithLabelValues("queueing:peers")
+ txFetcherQueueingHashes = transactionMetrics.WithLabelValues("queueing:hashes")
+ txFetcherFetchingPeers = transactionMetrics.WithLabelValues("fetching:peers")
+ txFetcherFetchingHashes = transactionMetrics.WithLabelValues("fetching:hashes")
)
// txAnnounce is the notification of the availability of a batch
@@ -214,7 +215,8 @@ func NewTxFetcherForTests(
// transactions in the network.
func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
// Keep track of all the announced transactions
- txAnnounceInMeter.Mark(int64(len(hashes)))
+ txAnnounceInMeter.Add(float64(len(hashes)))
+ log.Warn("About to announce transactions", "peer", peer, "txs", len(hashes))
// Skip any transaction announcements that we already know of, or that we've
// previously marked as cheap and discarded. This check is of course racey,
@@ -237,8 +239,8 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
unknowns = append(unknowns, hash)
}
}
- txAnnounceKnownMeter.Mark(duplicate)
- txAnnounceUnderpricedMeter.Mark(underpriced)
+ txAnnounceKnownMeter.Add(float64(duplicate))
+ txAnnounceUnderpricedMeter.Add(float64(underpriced))
// If anything's left to announce, push it into the internal loop
if len(unknowns) == 0 {
@@ -262,10 +264,11 @@ func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
// re-shedule missing transactions as soon as possible.
func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
// Keep track of all the propagated transactions
+ log.Info("About to enqueue transactions", "peer", peer, "txs", len(txs), "direct", direct)
if direct {
- txReplyInMeter.Mark(int64(len(txs)))
+ txReplyInMeter.Add(float64(len(txs)))
} else {
- txBroadcastInMeter.Mark(int64(len(txs)))
+ txBroadcastInMeter.Add(float64(len(txs)))
}
// Push all the transactions into the pool, tracking underpriced ones to avoid
// re-requesting them and dropping the peer in case of malicious transfers.
@@ -307,13 +310,13 @@ func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool)
added = append(added, txs[i].Hash())
}
if direct {
- txReplyKnownMeter.Mark(duplicate)
- txReplyUnderpricedMeter.Mark(underpriced)
- txReplyOtherRejectMeter.Mark(otherreject)
+ txReplyKnownMeter.Add(float64(duplicate))
+ txReplyUnderpricedMeter.Add(float64(underpriced))
+ txReplyOtherRejectMeter.Add(float64(otherreject))
} else {
- txBroadcastKnownMeter.Mark(duplicate)
- txBroadcastUnderpricedMeter.Mark(underpriced)
- txBroadcastOtherRejectMeter.Mark(otherreject)
+ txReplyKnownMeter.Add(float64(duplicate))
+ txReplyUnderpricedMeter.Add(float64(underpriced))
+ txReplyOtherRejectMeter.Add(float64(otherreject))
}
select {
case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
@@ -367,12 +370,12 @@ func (f *TxFetcher) loop() {
// all fulfilled, so the remainder are rescheduled without the cap
// check. Should be fine as the limit is in the thousands and the
// request size in the hundreds.
- txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
+ txAnnounceDOSMeter.Add(float64(len(ann.hashes)))
break
}
want := used + len(ann.hashes)
if want > maxTxAnnounces {
- txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
+ txAnnounceDOSMeter.Add(float64(want - maxTxAnnounces))
ann.hashes = ann.hashes[:want-maxTxAnnounces]
}
// All is well, schedule the remainder of the transactions
@@ -483,7 +486,7 @@ func (f *TxFetcher) loop() {
// possibly further increase the load on it.
for peer, req := range f.requests {
if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
- txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
+ txRequestOutMeter.Add(float64(len(req.hashes)))
// Reschedule all the not-yet-delivered fetches to alternate peers
for _, hash := range req.hashes {
@@ -563,7 +566,7 @@ func (f *TxFetcher) loop() {
// from the original query
if delivery.direct {
// Mark the reqesting successful (independent of individual status)
- txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
+ txRequestDoneMeter.Add(float64(len(delivery.hashes)))
// Make sure something was pending, nuke it
req := f.requests[delivery.origin]
@@ -671,13 +674,12 @@ func (f *TxFetcher) loop() {
case <-f.quit:
return
}
- // No idea what happened, but bump some sanity metrics
- txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
- txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
- txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
- txFetcherQueueingHashes.Update(int64(len(f.announced)))
- txFetcherFetchingPeers.Update(int64(len(f.requests)))
- txFetcherFetchingHashes.Update(int64(len(f.fetching)))
+ txFetcherWaitingPeers.Set(float64(len(f.waitslots)))
+ txFetcherWaitingHashes.Set(float64(len(f.waitlist)))
+ txFetcherQueueingPeers.Set(float64(len(f.announces) - len(f.requests)))
+ txFetcherQueueingHashes.Set(float64(len(f.announced)))
+ txFetcherFetchingPeers.Set(float64(len(f.requests)))
+ txFetcherFetchingHashes.Set(float64(len(f.fetching)))
// Loop did something, ping the step notifier if needed (tests)
if f.step != nil {
@@ -796,13 +798,13 @@ func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{},
// If any hashes were allocated, request them from the peer
if len(hashes) > 0 {
f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
- txRequestOutMeter.Mark(int64(len(hashes)))
+ txRequestOutMeter.Add(float64(len(hashes)))
go func(peer string, hashes []common.Hash) {
// Try to fetch the transactions, but in case of a request
// failure (e.g. peer disconnected), reschedule the hashes.
if err := f.fetchTxs(peer, hashes); err != nil {
- txRequestFailMeter.Mark(int64(len(hashes)))
+ txRequestFailMeter.Add(float64(len(hashes)))
f.Drop(peer)
}
}(peer, hashes)
diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go
index 6027b7a14c..62178e3680 100644
--- a/eth/protocols/eth/handler.go
+++ b/eth/protocols/eth/handler.go
@@ -24,7 +24,7 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/core"
"github.com/dominant-strategies/go-quai/core/types"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/p2p"
"github.com/dominant-strategies/go-quai/p2p/enode"
"github.com/dominant-strategies/go-quai/p2p/enr"
@@ -197,18 +197,6 @@ func handleMessage(backend Backend, peer *Peer) error {
} else {
return fmt.Errorf("protocol version not supported")
}
- // Track the amount of time it takes to serve the request and run the handler
- if metrics.Enabled {
- h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, c_ProtocolName, peer.Version(), msg.Code)
- defer func(start time.Time) {
- sampler := func() metrics.Sample {
- return metrics.ResettingSample(
- metrics.NewExpDecaySample(1028, 0.015),
- )
- }
- metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())
- }(time.Now())
- }
if handler := handlers[msg.Code]; handler != nil {
return handler(backend, msg, peer)
}
diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go
index 399ad4cbf3..8cec6404de 100644
--- a/ethdb/leveldb/leveldb.go
+++ b/ethdb/leveldb/leveldb.go
@@ -30,7 +30,6 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
@@ -63,20 +62,6 @@ type Database struct {
fn string // filename for reporting
db *leveldb.DB // LevelDB instance
- compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
- compReadMeter metrics.Meter // Meter for measuring the data read during compaction
- compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
- writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
- writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
- diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database
- diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
- diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
- memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction
- level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0
- nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
- seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
- manualMemAllocGauge metrics.Gauge // Gauge to track the amount of memory that has been manually allocated (not a part of runtime/GC)
-
quitLock sync.Mutex // Mutex protecting the quit channel access
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
@@ -131,20 +116,6 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option
log: log.Log,
quitChan: make(chan chan error),
}
- ldb.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
- ldb.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
- ldb.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
- ldb.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
- ldb.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
- ldb.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
- ldb.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
- ldb.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
- ldb.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
- ldb.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
- ldb.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
- ldb.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
- ldb.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil)
-
// Start up the metrics gathering and return
go ldb.meter(metricsGatheringInterval)
return ldb, nil
@@ -323,19 +294,6 @@ func (db *Database) meter(refresh time.Duration) {
compactions[i%2][idx] += value
}
}
- // Update all the requested meters
- if db.diskSizeGauge != nil {
- db.diskSizeGauge.Update(int64(compactions[i%2][0] * 1024 * 1024))
- }
- if db.compTimeMeter != nil {
- db.compTimeMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1000 * 1000 * 1000))
- }
- if db.compReadMeter != nil {
- db.compReadMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
- }
- if db.compWriteMeter != nil {
- db.compWriteMeter.Mark(int64((compactions[i%2][3] - compactions[(i-1)%2][3]) * 1024 * 1024))
- }
// Retrieve the write delay statistic
writedelay, err := db.db.GetProperty("leveldb.writedelay")
if err != nil {
@@ -360,12 +318,6 @@ func (db *Database) meter(refresh time.Duration) {
merr = err
continue
}
- if db.writeDelayNMeter != nil {
- db.writeDelayNMeter.Mark(delayN - delaystats[0])
- }
- if db.writeDelayMeter != nil {
- db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
- }
// If a warning that db is performing compaction has been displayed, any subsequent
// warnings will be withheld for one minute not to overwhelm the user.
if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
@@ -399,12 +351,6 @@ func (db *Database) meter(refresh time.Duration) {
merr = err
continue
}
- if db.diskReadMeter != nil {
- db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
- }
- if db.diskWriteMeter != nil {
- db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
- }
iostats[0], iostats[1] = nRead, nWrite
compCount, err := db.db.GetProperty("leveldb.compcount")
@@ -425,10 +371,6 @@ func (db *Database) meter(refresh time.Duration) {
merr = err
continue
}
- db.memCompGauge.Update(int64(memComp))
- db.level0CompGauge.Update(int64(level0Comp))
- db.nonlevel0CompGauge.Update(int64(nonLevel0Comp))
- db.seekCompGauge.Update(int64(seekComp))
// Sleep a bit, then repeat the stats collection
select {
diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go
index 1cb90c6a27..243061919d 100644
--- a/ethdb/pebble/pebble.go
+++ b/ethdb/pebble/pebble.go
@@ -32,7 +32,6 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
)
const (
@@ -56,20 +55,6 @@ type Database struct {
fn string // filename for reporting
db *pebble.DB // Underlying pebble storage engine
- compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction
- compReadMeter metrics.Meter // Meter for measuring the data read during compaction
- compWriteMeter metrics.Meter // Meter for measuring the data written during compaction
- writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
- writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction
- diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database
- diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read
- diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written
- memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction
- level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0
- nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
- seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
- manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated
-
quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database
closed bool // keep track of whether we're Closed
@@ -197,20 +182,6 @@ func New(file string, cache int, handles int, namespace string, readonly bool) (
}
db.db = innerDB
- db.compTimeMeter = metrics.NewRegisteredMeter(namespace+"compact/time", nil)
- db.compReadMeter = metrics.NewRegisteredMeter(namespace+"compact/input", nil)
- db.compWriteMeter = metrics.NewRegisteredMeter(namespace+"compact/output", nil)
- db.diskSizeGauge = metrics.NewRegisteredGauge(namespace+"disk/size", nil)
- db.diskReadMeter = metrics.NewRegisteredMeter(namespace+"disk/read", nil)
- db.diskWriteMeter = metrics.NewRegisteredMeter(namespace+"disk/write", nil)
- db.writeDelayMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/duration", nil)
- db.writeDelayNMeter = metrics.NewRegisteredMeter(namespace+"compact/writedelay/counter", nil)
- db.memCompGauge = metrics.NewRegisteredGauge(namespace+"compact/memory", nil)
- db.level0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/level0", nil)
- db.nonlevel0CompGauge = metrics.NewRegisteredGauge(namespace+"compact/nonlevel0", nil)
- db.seekCompGauge = metrics.NewRegisteredGauge(namespace+"compact/seek", nil)
- db.manualMemAllocGauge = metrics.NewRegisteredGauge(namespace+"memory/manualalloc", nil)
-
// Start up the metrics gathering and return
go db.meter(metricsGatheringInterval)
return db, nil
@@ -381,12 +352,10 @@ func (d *Database) meter(refresh time.Duration) {
compRead int64
nWrite int64
- metrics = d.db.Metrics()
- compTime = d.compTime.Load()
- writeDelayCount = d.writeDelayCount.Load()
- writeDelayTime = d.writeDelayTime.Load()
- nonLevel0CompCount = int64(d.nonLevel0Comp.Load())
- level0CompCount = int64(d.level0Comp.Load())
+ metrics = d.db.Metrics()
+ compTime = d.compTime.Load()
+ writeDelayCount = d.writeDelayCount.Load()
+ writeDelayTime = d.writeDelayTime.Load()
)
writeDelayTimes[i%2] = writeDelayTime
writeDelayCounts[i%2] = writeDelayCount
@@ -405,38 +374,6 @@ func (d *Database) meter(refresh time.Duration) {
compReads[i%2] = compRead
nWrites[i%2] = nWrite
- if d.writeDelayNMeter != nil {
- d.writeDelayNMeter.Mark(writeDelayCounts[i%2] - writeDelayCounts[(i-1)%2])
- }
- if d.writeDelayMeter != nil {
- d.writeDelayMeter.Mark(writeDelayTimes[i%2] - writeDelayTimes[(i-1)%2])
- }
- if d.compTimeMeter != nil {
- d.compTimeMeter.Mark(compTimes[i%2] - compTimes[(i-1)%2])
- }
- if d.compReadMeter != nil {
- d.compReadMeter.Mark(compReads[i%2] - compReads[(i-1)%2])
- }
- if d.compWriteMeter != nil {
- d.compWriteMeter.Mark(compWrites[i%2] - compWrites[(i-1)%2])
- }
- if d.diskSizeGauge != nil {
- d.diskSizeGauge.Update(int64(metrics.DiskSpaceUsage()))
- }
- if d.diskReadMeter != nil {
- d.diskReadMeter.Mark(0) // pebble doesn't track non-compaction reads
- }
- if d.diskWriteMeter != nil {
- d.diskWriteMeter.Mark(nWrites[i%2] - nWrites[(i-1)%2])
- }
- // See https://github.com/cockroachdb/pebble/pull/1628#pullrequestreview-1026664054
- manuallyAllocated := metrics.BlockCache.Size + int64(metrics.MemTable.Size) + int64(metrics.MemTable.ZombieSize)
- d.manualMemAllocGauge.Update(manuallyAllocated)
- d.memCompGauge.Update(metrics.Flush.Count)
- d.nonlevel0CompGauge.Update(nonLevel0CompCount)
- d.level0CompGauge.Update(level0CompCount)
- d.seekCompGauge.Update(metrics.Compact.ReadCount)
-
// Sleep a bit, then repeat the stats collection
select {
case errc = <-d.quitChan:
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 919e797e25..3f739c1f86 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -23,8 +23,6 @@ import (
"runtime"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
- "github.com/dominant-strategies/go-quai/metrics/exp"
"github.com/fjl/memsize/memsizeui"
"gopkg.in/urfave/cli.v1"
)
@@ -204,9 +202,6 @@ func Setup(ctx *cli.Context) error {
func StartPProf(address string, withMetrics bool) {
// Hook go-metrics into expvar on any /debug/metrics request, load all vars
// from the registry into expvar, and execute regular expvar handler.
- if withMetrics {
- exp.Exp(metrics.DefaultRegistry)
- }
http.Handle("/memsize/", http.StripPrefix("/memsize", &Memsize))
log.Info("Starting pprof server", "addr", fmt.Sprintf("http://%s/debug/pprof", address))
go func() {
diff --git a/metrics/FORK.md b/metrics/FORK.md
deleted file mode 100644
index b19985bf56..0000000000
--- a/metrics/FORK.md
+++ /dev/null
@@ -1 +0,0 @@
-This repo has been forked from https://github.com/rcrowley/go-metrics at commit e181e09
diff --git a/metrics/LICENSE b/metrics/LICENSE
deleted file mode 100644
index 363fa9ee77..0000000000
--- a/metrics/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright 2012 Richard Crowley. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
-THE POSSIBILITY OF SUCH DAMAGE.
-
-The views and conclusions contained in the software and documentation
-are those of the authors and should not be interpreted as representing
-official policies, either expressed or implied, of Richard Crowley.
diff --git a/metrics/README.md b/metrics/README.md
deleted file mode 100644
index e2d7945008..0000000000
--- a/metrics/README.md
+++ /dev/null
@@ -1,166 +0,0 @@
-go-metrics
-==========
-
-![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
-
-Go port of Coda Hale's Metrics library: .
-
-Documentation: .
-
-Usage
------
-
-Create and update metrics:
-
-```go
-c := metrics.NewCounter()
-metrics.Register("foo", c)
-c.Inc(47)
-
-g := metrics.NewGauge()
-metrics.Register("bar", g)
-g.Update(47)
-
-r := NewRegistry()
-g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
-
-s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
-h := metrics.NewHistogram(s)
-metrics.Register("baz", h)
-h.Update(47)
-
-m := metrics.NewMeter()
-metrics.Register("quux", m)
-m.Mark(47)
-
-t := metrics.NewTimer()
-metrics.Register("bang", t)
-t.Time(func() {})
-t.Update(47)
-```
-
-Register() is not threadsafe. For threadsafe metric registration use
-GetOrRegister:
-
-```go
-t := metrics.GetOrRegisterTimer("account.create.latency", nil)
-t.Time(func() {})
-t.Update(47)
-```
-
-**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
-leak memory:
-
-```go
-// Will call Stop() on the Meter to allow for garbage collection
-metrics.Unregister("quux")
-// Or similarly for a Timer that embeds a Meter
-metrics.Unregister("bang")
-```
-
-Periodically log every metric in human-readable form to standard error:
-
-```go
-go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
-```
-
-Periodically log every metric in slightly-more-parseable form to syslog:
-
-```go
-w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
-go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
-```
-
-Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
-
-```go
-
-import "github.com/cyberdelia/go-metrics-graphite"
-
-addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
-go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
-```
-
-Periodically emit every metric into InfluxDB:
-
-**NOTE:** this has been pulled out of the library due to constant fluctuations
-in the InfluxDB API. In fact, all client libraries are on their way out. see
-issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
-[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
-
-```go
-import "github.com/vrischmann/go-metrics-influxdb"
-
-go influxdb.InfluxDB(metrics.DefaultRegistry,
- 10e9,
- "127.0.0.1:8086",
- "database-name",
- "username",
- "password"
-)
-```
-
-Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
-
-**Note**: the client included with this repository under the `librato` package
-has been deprecated and moved to the repository linked above.
-
-```go
-import "github.com/mihasya/go-metrics-librato"
-
-go librato.Librato(metrics.DefaultRegistry,
- 10e9, // interval
- "example@example.com", // account owner email address
- "token", // Librato API token
- "hostname", // source
- []float64{0.95}, // percentiles to send
- time.Millisecond, // time unit
-)
-```
-
-Periodically emit every metric to StatHat:
-
-```go
-import "github.com/rcrowley/go-metrics/stathat"
-
-go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
-```
-
-Maintain all metrics along with expvars at `/debug/metrics`:
-
-This uses the same mechanism as [the official expvar](https://golang.org/pkg/expvar/)
-but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
-as well as all your go-metrics.
-
-
-```go
-import "github.com/rcrowley/go-metrics/exp"
-
-exp.Exp(metrics.DefaultRegistry)
-```
-
-Installation
-------------
-
-```sh
-go get github.com/rcrowley/go-metrics
-```
-
-StatHat support additionally requires their Go client:
-
-```sh
-go get github.com/stathat/go
-```
-
-Publishing Metrics
-------------------
-
-Clients are available for the following destinations:
-
-* Librato - https://github.com/mihasya/go-metrics-librato
-* Graphite - https://github.com/cyberdelia/go-metrics-graphite
-* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
-* Ganglia - https://github.com/appscode/metlia
-* Prometheus - https://github.com/deathowl/go-metrics-prometheus
-* DataDog - https://github.com/syntaqx/go-metrics-datadog
-* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
diff --git a/metrics/config.go b/metrics/config.go
deleted file mode 100644
index d9f80a1feb..0000000000
--- a/metrics/config.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2021 The go-ethereum Authors
-// This file is part of go-ethereum.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package metrics
-
-// Config contains the configuration for the metric collection.
-type Config struct {
- Enabled bool `toml:",omitempty"`
- EnabledExpensive bool `toml:",omitempty"`
- HTTP string `toml:",omitempty"`
- Port int `toml:",omitempty"`
- EnableInfluxDB bool `toml:",omitempty"`
- InfluxDBEndpoint string `toml:",omitempty"`
- InfluxDBDatabase string `toml:",omitempty"`
- InfluxDBUsername string `toml:",omitempty"`
- InfluxDBPassword string `toml:",omitempty"`
- InfluxDBTags string `toml:",omitempty"`
-}
-
-// DefaultConfig is the default config for metrics used in go-quai.
-var DefaultConfig = Config{
- Enabled: false,
- EnabledExpensive: false,
- HTTP: "127.0.0.1",
- Port: 6060,
- EnableInfluxDB: false,
- InfluxDBEndpoint: "http://localhost:8086",
- InfluxDBDatabase: "quai",
- InfluxDBUsername: "test",
- InfluxDBPassword: "test",
- InfluxDBTags: "host=localhost",
-}
diff --git a/metrics/counter.go b/metrics/counter.go
deleted file mode 100644
index 2f78c90d5c..0000000000
--- a/metrics/counter.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package metrics
-
-import (
- "sync/atomic"
-)
-
-// Counters hold an int64 value that can be incremented and decremented.
-type Counter interface {
- Clear()
- Count() int64
- Dec(int64)
- Inc(int64)
- Snapshot() Counter
-}
-
-// GetOrRegisterCounter returns an existing Counter or constructs and registers
-// a new StandardCounter.
-func GetOrRegisterCounter(name string, r Registry) Counter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewCounter).(Counter)
-}
-
-// GetOrRegisterCounterForced returns an existing Counter or constructs and registers a
-// new Counter no matter the global switch is enabled or not.
-// Be sure to unregister the counter from the registry once it is of no use to
-// allow for garbage collection.
-func GetOrRegisterCounterForced(name string, r Registry) Counter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewCounterForced).(Counter)
-}
-
-// NewCounter constructs a new StandardCounter.
-func NewCounter() Counter {
- if !Enabled {
- return NilCounter{}
- }
- return &StandardCounter{0}
-}
-
-// NewCounterForced constructs a new StandardCounter and returns it no matter if
-// the global switch is enabled or not.
-func NewCounterForced() Counter {
- return &StandardCounter{0}
-}
-
-// NewRegisteredCounter constructs and registers a new StandardCounter.
-func NewRegisteredCounter(name string, r Registry) Counter {
- c := NewCounter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewRegisteredCounterForced constructs and registers a new StandardCounter
-// and launches a goroutine no matter the global switch is enabled or not.
-// Be sure to unregister the counter from the registry once it is of no use to
-// allow for garbage collection.
-func NewRegisteredCounterForced(name string, r Registry) Counter {
- c := NewCounterForced()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// CounterSnapshot is a read-only copy of another Counter.
-type CounterSnapshot int64
-
-// Clear panics.
-func (CounterSnapshot) Clear() {
- panic("Clear called on a CounterSnapshot")
-}
-
-// Count returns the count at the time the snapshot was taken.
-func (c CounterSnapshot) Count() int64 { return int64(c) }
-
-// Dec panics.
-func (CounterSnapshot) Dec(int64) {
- panic("Dec called on a CounterSnapshot")
-}
-
-// Inc panics.
-func (CounterSnapshot) Inc(int64) {
- panic("Inc called on a CounterSnapshot")
-}
-
-// Snapshot returns the snapshot.
-func (c CounterSnapshot) Snapshot() Counter { return c }
-
-// NilCounter is a no-op Counter.
-type NilCounter struct{}
-
-// Clear is a no-op.
-func (NilCounter) Clear() {}
-
-// Count is a no-op.
-func (NilCounter) Count() int64 { return 0 }
-
-// Dec is a no-op.
-func (NilCounter) Dec(i int64) {}
-
-// Inc is a no-op.
-func (NilCounter) Inc(i int64) {}
-
-// Snapshot is a no-op.
-func (NilCounter) Snapshot() Counter { return NilCounter{} }
-
-// StandardCounter is the standard implementation of a Counter and uses the
-// sync/atomic package to manage a single int64 value.
-type StandardCounter struct {
- count int64
-}
-
-// Clear sets the counter to zero.
-func (c *StandardCounter) Clear() {
- atomic.StoreInt64(&c.count, 0)
-}
-
-// Count returns the current count.
-func (c *StandardCounter) Count() int64 {
- return atomic.LoadInt64(&c.count)
-}
-
-// Dec decrements the counter by the given amount.
-func (c *StandardCounter) Dec(i int64) {
- atomic.AddInt64(&c.count, -i)
-}
-
-// Inc increments the counter by the given amount.
-func (c *StandardCounter) Inc(i int64) {
- atomic.AddInt64(&c.count, i)
-}
-
-// Snapshot returns a read-only copy of the counter.
-func (c *StandardCounter) Snapshot() Counter {
- return CounterSnapshot(c.Count())
-}
diff --git a/metrics/counter_test.go b/metrics/counter_test.go
deleted file mode 100644
index af26ef1548..0000000000
--- a/metrics/counter_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package metrics
-
-import "testing"
-
-func BenchmarkCounter(b *testing.B) {
- c := NewCounter()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- c.Inc(1)
- }
-}
-
-func TestCounterClear(t *testing.T) {
- c := NewCounter()
- c.Inc(1)
- c.Clear()
- if count := c.Count(); count != 0 {
- t.Errorf("c.Count(): 0 != %v\n", count)
- }
-}
-
-func TestCounterDec1(t *testing.T) {
- c := NewCounter()
- c.Dec(1)
- if count := c.Count(); count != -1 {
- t.Errorf("c.Count(): -1 != %v\n", count)
- }
-}
-
-func TestCounterDec2(t *testing.T) {
- c := NewCounter()
- c.Dec(2)
- if count := c.Count(); count != -2 {
- t.Errorf("c.Count(): -2 != %v\n", count)
- }
-}
-
-func TestCounterInc1(t *testing.T) {
- c := NewCounter()
- c.Inc(1)
- if count := c.Count(); count != 1 {
- t.Errorf("c.Count(): 1 != %v\n", count)
- }
-}
-
-func TestCounterInc2(t *testing.T) {
- c := NewCounter()
- c.Inc(2)
- if count := c.Count(); count != 2 {
- t.Errorf("c.Count(): 2 != %v\n", count)
- }
-}
-
-func TestCounterSnapshot(t *testing.T) {
- c := NewCounter()
- c.Inc(1)
- snapshot := c.Snapshot()
- c.Inc(1)
- if count := snapshot.Count(); count != 1 {
- t.Errorf("c.Count(): 1 != %v\n", count)
- }
-}
-
-func TestCounterZero(t *testing.T) {
- c := NewCounter()
- if count := c.Count(); count != 0 {
- t.Errorf("c.Count(): 0 != %v\n", count)
- }
-}
-
-func TestGetOrRegisterCounter(t *testing.T) {
- r := NewRegistry()
- NewRegisteredCounter("foo", r).Inc(47)
- if c := GetOrRegisterCounter("foo", r); c.Count() != 47 {
- t.Fatal(c)
- }
-}
diff --git a/metrics/cpu.go b/metrics/cpu.go
deleted file mode 100644
index 72ece16e07..0000000000
--- a/metrics/cpu.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package metrics
-
-// CPUStats is the system and process CPU stats.
-type CPUStats struct {
- GlobalTime int64 // Time spent by the CPU working on all processes
- GlobalWait int64 // Time spent by waiting on disk for all processes
- LocalTime int64 // Time spent by the CPU working on this process
-}
diff --git a/metrics/cpu_disabled.go b/metrics/cpu_disabled.go
deleted file mode 100644
index 6c3428993f..0000000000
--- a/metrics/cpu_disabled.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// +build ios
-
-package metrics
-
-// ReadCPUStats retrieves the current CPU stats. Internally this uses `gosigar`,
-// which is not supported on the platforms in this file.
-func ReadCPUStats(stats *CPUStats) {}
diff --git a/metrics/cpu_enabled.go b/metrics/cpu_enabled.go
deleted file mode 100644
index 3a272c54cf..0000000000
--- a/metrics/cpu_enabled.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-//go:build !ios
-// +build !ios
-
-package metrics
-
-import (
- "github.com/dominant-strategies/go-quai/log"
- "github.com/shirou/gopsutil/cpu"
-)
-
-// ReadCPUStats retrieves the current CPU stats.
-func ReadCPUStats(stats *CPUStats) {
- // passing false to request all cpu times
- timeStats, err := cpu.Times(false)
- if err != nil {
- log.Error("Could not read cpu stats", "err", err)
- return
- }
- if len(timeStats) == 0 {
- log.Error("Empty cpu stats")
- return
- }
- // requesting all cpu times will always return an array with only one time stats entry
- timeStat := timeStats[0]
- stats.GlobalTime = int64((timeStat.User + timeStat.Nice + timeStat.System) * cpu.ClocksPerSec)
- stats.GlobalWait = int64((timeStat.Iowait) * cpu.ClocksPerSec)
- stats.LocalTime = getProcessCPUTime()
-}
diff --git a/metrics/cpu_syscall.go b/metrics/cpu_syscall.go
deleted file mode 100644
index 6c69c5fd41..0000000000
--- a/metrics/cpu_syscall.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-//go:build !windows
-// +build !windows
-
-package metrics
-
-import (
- syscall "golang.org/x/sys/unix"
-
- "github.com/dominant-strategies/go-quai/log"
-)
-
-// getProcessCPUTime retrieves the process' CPU time since program startup.
-func getProcessCPUTime() int64 {
- var usage syscall.Rusage
- if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil {
- log.Warn("Failed to retrieve CPU time", "err", err)
- return 0
- }
- return int64(usage.Utime.Sec+usage.Stime.Sec)*100 + int64(usage.Utime.Usec+usage.Stime.Usec)/10000 //nolint:unconvert
-}
diff --git a/metrics/cpu_windows.go b/metrics/cpu_windows.go
deleted file mode 100644
index fb29a52a82..0000000000
--- a/metrics/cpu_windows.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package metrics
-
-// getProcessCPUTime returns 0 on Windows as there is no system call to resolve
-// the actual process' CPU time.
-func getProcessCPUTime() int64 {
- return 0
-}
diff --git a/metrics/debug.go b/metrics/debug.go
deleted file mode 100644
index de4a2739fe..0000000000
--- a/metrics/debug.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package metrics
-
-import (
- "runtime/debug"
- "time"
-)
-
-var (
- debugMetrics struct {
- GCStats struct {
- LastGC Gauge
- NumGC Gauge
- Pause Histogram
- //PauseQuantiles Histogram
- PauseTotal Gauge
- }
- ReadGCStats Timer
- }
- gcStats debug.GCStats
-)
-
-// Capture new values for the Go garbage collector statistics exported in
-// debug.GCStats. This is designed to be called as a goroutine.
-func CaptureDebugGCStats(r Registry, d time.Duration) {
- for range time.Tick(d) {
- CaptureDebugGCStatsOnce(r)
- }
-}
-
-// Capture new values for the Go garbage collector statistics exported in
-// debug.GCStats. This is designed to be called in a background goroutine.
-// Giving a registry which has not been given to RegisterDebugGCStats will
-// panic.
-//
-// Be careful (but much less so) with this because debug.ReadGCStats calls
-// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
-// operation, isn't something you want to be doing all the time.
-func CaptureDebugGCStatsOnce(r Registry) {
- lastGC := gcStats.LastGC
- t := time.Now()
- debug.ReadGCStats(&gcStats)
- debugMetrics.ReadGCStats.UpdateSince(t)
-
- debugMetrics.GCStats.LastGC.Update(gcStats.LastGC.UnixNano())
- debugMetrics.GCStats.NumGC.Update(gcStats.NumGC)
- if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
- debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
- }
- //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
- debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
-}
-
-// Register metrics for the Go garbage collector statistics exported in
-// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
-// i.e. debug.GCStats.PauseTotal.
-func RegisterDebugGCStats(r Registry) {
- debugMetrics.GCStats.LastGC = NewGauge()
- debugMetrics.GCStats.NumGC = NewGauge()
- debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
- //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
- debugMetrics.GCStats.PauseTotal = NewGauge()
- debugMetrics.ReadGCStats = NewTimer()
-
- r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
- r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
- r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
- //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
- r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
- r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
-}
-
-// Allocate an initial slice for gcStats.Pause to avoid allocations during
-// normal operation.
-func init() {
- gcStats.Pause = make([]time.Duration, 11)
-}
diff --git a/metrics/debug_test.go b/metrics/debug_test.go
deleted file mode 100644
index 07eb867841..0000000000
--- a/metrics/debug_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package metrics
-
-import (
- "runtime"
- "runtime/debug"
- "testing"
- "time"
-)
-
-func BenchmarkDebugGCStats(b *testing.B) {
- r := NewRegistry()
- RegisterDebugGCStats(r)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- CaptureDebugGCStatsOnce(r)
- }
-}
-
-func TestDebugGCStatsBlocking(t *testing.T) {
- if g := runtime.GOMAXPROCS(0); g < 2 {
- t.Skipf("skipping TestDebugGCMemStatsBlocking with GOMAXPROCS=%d\n", g)
- return
- }
- ch := make(chan int)
- go testDebugGCStatsBlocking(ch)
- var gcStats debug.GCStats
- t0 := time.Now()
- debug.ReadGCStats(&gcStats)
- t1 := time.Now()
- t.Log("i++ during debug.ReadGCStats:", <-ch)
- go testDebugGCStatsBlocking(ch)
- d := t1.Sub(t0)
- t.Log(d)
- time.Sleep(d)
- t.Log("i++ during time.Sleep:", <-ch)
-}
-
-func testDebugGCStatsBlocking(ch chan int) {
- i := 0
- for {
- select {
- case ch <- i:
- return
- default:
- i++
- }
- }
-}
diff --git a/metrics/disk.go b/metrics/disk.go
deleted file mode 100644
index 25142d2ad1..0000000000
--- a/metrics/disk.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package metrics
-
-// DiskStats is the per process disk io stats.
-type DiskStats struct {
- ReadCount int64 // Number of read operations executed
- ReadBytes int64 // Total number of bytes read
- WriteCount int64 // Number of write operations executed
- WriteBytes int64 // Total number of byte written
-}
diff --git a/metrics/disk_linux.go b/metrics/disk_linux.go
deleted file mode 100644
index 8d610cd674..0000000000
--- a/metrics/disk_linux.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Contains the Linux implementation of process disk IO counter retrieval.
-
-package metrics
-
-import (
- "bufio"
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
-)
-
-// ReadDiskStats retrieves the disk IO stats belonging to the current process.
-func ReadDiskStats(stats *DiskStats) error {
- // Open the process disk IO counter file
- inf, err := os.Open(fmt.Sprintf("/proc/%d/io", os.Getpid()))
- if err != nil {
- return err
- }
- defer inf.Close()
- in := bufio.NewReader(inf)
-
- // Iterate over the IO counter, and extract what we need
- for {
- // Read the next line and split to key and value
- line, err := in.ReadString('\n')
- if err != nil {
- if err == io.EOF {
- return nil
- }
- return err
- }
- parts := strings.Split(line, ":")
- if len(parts) != 2 {
- continue
- }
- key := strings.TrimSpace(parts[0])
- value, err := strconv.ParseInt(strings.TrimSpace(parts[1]), 10, 64)
- if err != nil {
- return err
- }
-
- // Update the counter based on the key
- switch key {
- case "syscr":
- stats.ReadCount = value
- case "syscw":
- stats.WriteCount = value
- case "rchar":
- stats.ReadBytes = value
- case "wchar":
- stats.WriteBytes = value
- }
- }
-}
diff --git a/metrics/disk_nop.go b/metrics/disk_nop.go
deleted file mode 100644
index 58fa4e02f8..0000000000
--- a/metrics/disk_nop.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-//go:build !linux
-// +build !linux
-
-package metrics
-
-import "errors"
-
-// ReadDiskStats retrieves the disk IO stats belonging to the current process.
-func ReadDiskStats(stats *DiskStats) error {
- return errors.New("Not implemented")
-}
diff --git a/metrics/doc.go b/metrics/doc.go
deleted file mode 100644
index 13f429c168..0000000000
--- a/metrics/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package metrics
-
-const epsilon = 0.0000000000000001
-const epsilonPercentile = .00000000001
diff --git a/metrics/ewma.go b/metrics/ewma.go
deleted file mode 100644
index 039286493e..0000000000
--- a/metrics/ewma.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package metrics
-
-import (
- "math"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// EWMAs continuously calculate an exponentially-weighted moving average
-// based on an outside source of clock ticks.
-type EWMA interface {
- Rate() float64
- Snapshot() EWMA
- Tick()
- Update(int64)
-}
-
-// NewEWMA constructs a new EWMA with the given alpha.
-func NewEWMA(alpha float64) EWMA {
- return &StandardEWMA{alpha: alpha}
-}
-
-// NewEWMA1 constructs a new EWMA for a one-minute moving average.
-func NewEWMA1() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/1))
-}
-
-// NewEWMA5 constructs a new EWMA for a five-minute moving average.
-func NewEWMA5() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/5))
-}
-
-// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
-func NewEWMA15() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/15))
-}
-
-// EWMASnapshot is a read-only copy of another EWMA.
-type EWMASnapshot float64
-
-// Rate returns the rate of events per second at the time the snapshot was
-// taken.
-func (a EWMASnapshot) Rate() float64 { return float64(a) }
-
-// Snapshot returns the snapshot.
-func (a EWMASnapshot) Snapshot() EWMA { return a }
-
-// Tick panics.
-func (EWMASnapshot) Tick() {
- panic("Tick called on an EWMASnapshot")
-}
-
-// Update panics.
-func (EWMASnapshot) Update(int64) {
- panic("Update called on an EWMASnapshot")
-}
-
-// NilEWMA is a no-op EWMA.
-type NilEWMA struct{}
-
-// Rate is a no-op.
-func (NilEWMA) Rate() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
-
-// Tick is a no-op.
-func (NilEWMA) Tick() {}
-
-// Update is a no-op.
-func (NilEWMA) Update(n int64) {}
-
-// StandardEWMA is the standard implementation of an EWMA and tracks the number
-// of uncounted events and processes them on each tick. It uses the
-// sync/atomic package to manage uncounted events.
-type StandardEWMA struct {
- uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
- alpha float64
- rate float64
- init bool
- mutex sync.Mutex
-}
-
-// Rate returns the moving average rate of events per second.
-func (a *StandardEWMA) Rate() float64 {
- a.mutex.Lock()
- defer a.mutex.Unlock()
- return a.rate * float64(time.Second)
-}
-
-// Snapshot returns a read-only copy of the EWMA.
-func (a *StandardEWMA) Snapshot() EWMA {
- return EWMASnapshot(a.Rate())
-}
-
-// Tick ticks the clock to update the moving average. It assumes it is called
-// every five seconds.
-func (a *StandardEWMA) Tick() {
- count := atomic.LoadInt64(&a.uncounted)
- atomic.AddInt64(&a.uncounted, -count)
- instantRate := float64(count) / float64(5*time.Second)
- a.mutex.Lock()
- defer a.mutex.Unlock()
- if a.init {
- a.rate += a.alpha * (instantRate - a.rate)
- } else {
- a.init = true
- a.rate = instantRate
- }
-}
-
-// Update adds n uncounted events.
-func (a *StandardEWMA) Update(n int64) {
- atomic.AddInt64(&a.uncounted, n)
-}
diff --git a/metrics/ewma_test.go b/metrics/ewma_test.go
deleted file mode 100644
index 5b24419161..0000000000
--- a/metrics/ewma_test.go
+++ /dev/null
@@ -1,228 +0,0 @@
-package metrics
-
-import (
- "math"
- "testing"
-)
-
-func BenchmarkEWMA(b *testing.B) {
- a := NewEWMA1()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- a.Update(1)
- a.Tick()
- }
-}
-
-func TestEWMA1(t *testing.T) {
- a := NewEWMA1()
- a.Update(3)
- a.Tick()
- if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.22072766470286553-rate) > epsilon {
- t.Errorf("1 minute a.Rate(): 0.22072766470286553 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.08120116994196772-rate) > epsilon {
- t.Errorf("2 minute a.Rate(): 0.08120116994196772 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.029872241020718428-rate) > epsilon {
- t.Errorf("3 minute a.Rate(): 0.029872241020718428 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.01098938333324054-rate) > epsilon {
- t.Errorf("4 minute a.Rate(): 0.01098938333324054 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.004042768199451294-rate) > epsilon {
- t.Errorf("5 minute a.Rate(): 0.004042768199451294 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.0014872513059998212-rate) > epsilon {
- t.Errorf("6 minute a.Rate(): 0.0014872513059998212 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.0005471291793327122-rate) > epsilon {
- t.Errorf("7 minute a.Rate(): 0.0005471291793327122 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.00020127757674150815-rate) > epsilon {
- t.Errorf("8 minute a.Rate(): 0.00020127757674150815 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(7.404588245200814e-05-rate) > epsilon {
- t.Errorf("9 minute a.Rate(): 7.404588245200814e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(2.7239957857491083e-05-rate) > epsilon {
- t.Errorf("10 minute a.Rate(): 2.7239957857491083e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(1.0021020474147462e-05-rate) > epsilon {
- t.Errorf("11 minute a.Rate(): 1.0021020474147462e-05 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(3.6865274119969525e-06-rate) > epsilon {
- t.Errorf("12 minute a.Rate(): 3.6865274119969525e-06 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(1.3561976441886433e-06-rate) > epsilon {
- t.Errorf("13 minute a.Rate(): 1.3561976441886433e-06 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(4.989172314621449e-07-rate) > epsilon {
- t.Errorf("14 minute a.Rate(): 4.989172314621449e-07 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(1.8354139230109722e-07-rate) > epsilon {
- t.Errorf("15 minute a.Rate(): 1.8354139230109722e-07 != %v\n", rate)
- }
-}
-
-func TestEWMA5(t *testing.T) {
- a := NewEWMA5()
- a.Update(3)
- a.Tick()
- if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.49123845184678905-rate) > epsilon {
- t.Errorf("1 minute a.Rate(): 0.49123845184678905 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4021920276213837-rate) > epsilon {
- t.Errorf("2 minute a.Rate(): 0.4021920276213837 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.32928698165641596-rate) > epsilon {
- t.Errorf("3 minute a.Rate(): 0.32928698165641596 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.269597378470333-rate) > epsilon {
- t.Errorf("4 minute a.Rate(): 0.269597378470333 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2207276647028654-rate) > epsilon {
- t.Errorf("5 minute a.Rate(): 0.2207276647028654 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.18071652714732128-rate) > epsilon {
- t.Errorf("6 minute a.Rate(): 0.18071652714732128 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.14795817836496392-rate) > epsilon {
- t.Errorf("7 minute a.Rate(): 0.14795817836496392 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.12113791079679326-rate) > epsilon {
- t.Errorf("8 minute a.Rate(): 0.12113791079679326 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.09917933293295193-rate) > epsilon {
- t.Errorf("9 minute a.Rate(): 0.09917933293295193 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.08120116994196763-rate) > epsilon {
- t.Errorf("10 minute a.Rate(): 0.08120116994196763 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.06648189501740036-rate) > epsilon {
- t.Errorf("11 minute a.Rate(): 0.06648189501740036 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.05443077197364752-rate) > epsilon {
- t.Errorf("12 minute a.Rate(): 0.05443077197364752 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.04456414692860035-rate) > epsilon {
- t.Errorf("13 minute a.Rate(): 0.04456414692860035 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.03648603757513079-rate) > epsilon {
- t.Errorf("14 minute a.Rate(): 0.03648603757513079 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.0298722410207183831020718428-rate) > epsilon {
- t.Errorf("15 minute a.Rate(): 0.0298722410207183831020718428 != %v\n", rate)
- }
-}
-
-func TestEWMA15(t *testing.T) {
- a := NewEWMA15()
- a.Update(3)
- a.Tick()
- if rate := a.Rate(); math.Abs(0.6-rate) > epsilon {
- t.Errorf("initial a.Rate(): 0.6 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.5613041910189706-rate) > epsilon {
- t.Errorf("1 minute a.Rate(): 0.5613041910189706 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.5251039914257684-rate) > epsilon {
- t.Errorf("2 minute a.Rate(): 0.5251039914257684 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4912384518467888184678905-rate) > epsilon {
- t.Errorf("3 minute a.Rate(): 0.4912384518467888184678905 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.459557003018789-rate) > epsilon {
- t.Errorf("4 minute a.Rate(): 0.459557003018789 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4299187863442732-rate) > epsilon {
- t.Errorf("5 minute a.Rate(): 0.4299187863442732 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.4021920276213831-rate) > epsilon {
- t.Errorf("6 minute a.Rate(): 0.4021920276213831 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.37625345116383313-rate) > epsilon {
- t.Errorf("7 minute a.Rate(): 0.37625345116383313 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.3519877317060185-rate) > epsilon {
- t.Errorf("8 minute a.Rate(): 0.3519877317060185 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.3292869816564153165641596-rate) > epsilon {
- t.Errorf("9 minute a.Rate(): 0.3292869816564153165641596 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.3080502714195546-rate) > epsilon {
- t.Errorf("10 minute a.Rate(): 0.3080502714195546 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2881831806538789-rate) > epsilon {
- t.Errorf("11 minute a.Rate(): 0.2881831806538789 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.26959737847033216-rate) > epsilon {
- t.Errorf("12 minute a.Rate(): 0.26959737847033216 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2522102307052083-rate) > epsilon {
- t.Errorf("13 minute a.Rate(): 0.2522102307052083 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.23594443252115815-rate) > epsilon {
- t.Errorf("14 minute a.Rate(): 0.23594443252115815 != %v\n", rate)
- }
- elapseMinute(a)
- if rate := a.Rate(); math.Abs(0.2207276647028646247028654470286553-rate) > epsilon {
- t.Errorf("15 minute a.Rate(): 0.2207276647028646247028654470286553 != %v\n", rate)
- }
-}
-
-func elapseMinute(a EWMA) {
- for i := 0; i < 12; i++ {
- a.Tick()
- }
-}
diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go
deleted file mode 100644
index 1ad1b24988..0000000000
--- a/metrics/exp/exp.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Hook go-metrics into expvar
-// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
-package exp
-
-import (
- "expvar"
- "fmt"
- "net/http"
- "sync"
-
- "github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
- "github.com/dominant-strategies/go-quai/metrics/prometheus"
-)
-
-type exp struct {
- expvarLock sync.Mutex // expvar panics if you try to register the same var twice, so we must probe it safely
- registry metrics.Registry
-}
-
-func (exp *exp) expHandler(w http.ResponseWriter, r *http.Request) {
- // load our variables into expvar
- exp.syncToExpvar()
-
- // now just run the official expvar handler code (which is not publicly callable, so pasted inline)
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- fmt.Fprintf(w, "{\n")
- first := true
- expvar.Do(func(kv expvar.KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
-
-// Exp will register an expvar powered metrics handler with http.DefaultServeMux on "/debug/vars"
-func Exp(r metrics.Registry) {
- h := ExpHandler(r)
- // this would cause a panic:
- // panic: http: multiple registrations for /debug/vars
- // http.HandleFunc("/debug/vars", e.expHandler)
- // haven't found an elegant way, so just use a different endpoint
- http.Handle("/debug/metrics", h)
- http.Handle("/debug/metrics/prometheus", prometheus.Handler(r))
-}
-
-// ExpHandler will return an expvar powered metrics handler.
-func ExpHandler(r metrics.Registry) http.Handler {
- e := exp{sync.Mutex{}, r}
- return http.HandlerFunc(e.expHandler)
-}
-
-// Setup starts a dedicated metrics server at the given address.
-// This function enables metrics reporting separate from pprof.
-func Setup(address string) {
- m := http.NewServeMux()
- m.Handle("/debug/metrics", ExpHandler(metrics.DefaultRegistry))
- m.Handle("/debug/metrics/prometheus", prometheus.Handler(metrics.DefaultRegistry))
- log.Info("Starting metrics server", "addr", fmt.Sprintf("http://%s/debug/metrics", address))
- go func() {
- if err := http.ListenAndServe(address, m); err != nil {
- log.Error("Failure in running metrics server", "err", err)
- }
- }()
-}
-
-func (exp *exp) getInt(name string) *expvar.Int {
- var v *expvar.Int
- exp.expvarLock.Lock()
- p := expvar.Get(name)
- if p != nil {
- v = p.(*expvar.Int)
- } else {
- v = new(expvar.Int)
- expvar.Publish(name, v)
- }
- exp.expvarLock.Unlock()
- return v
-}
-
-func (exp *exp) getFloat(name string) *expvar.Float {
- var v *expvar.Float
- exp.expvarLock.Lock()
- p := expvar.Get(name)
- if p != nil {
- v = p.(*expvar.Float)
- } else {
- v = new(expvar.Float)
- expvar.Publish(name, v)
- }
- exp.expvarLock.Unlock()
- return v
-}
-
-func (exp *exp) publishCounter(name string, metric metrics.Counter) {
- v := exp.getInt(name)
- v.Set(metric.Count())
-}
-
-func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
- v := exp.getInt(name)
- v.Set(metric.Value())
-}
-func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
- exp.getFloat(name).Set(metric.Value())
-}
-
-func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- exp.getInt(name + ".count").Set(h.Count())
- exp.getFloat(name + ".min").Set(float64(h.Min()))
- exp.getFloat(name + ".max").Set(float64(h.Max()))
- exp.getFloat(name + ".mean").Set(h.Mean())
- exp.getFloat(name + ".std-dev").Set(h.StdDev())
- exp.getFloat(name + ".50-percentile").Set(ps[0])
- exp.getFloat(name + ".75-percentile").Set(ps[1])
- exp.getFloat(name + ".95-percentile").Set(ps[2])
- exp.getFloat(name + ".99-percentile").Set(ps[3])
- exp.getFloat(name + ".999-percentile").Set(ps[4])
-}
-
-func (exp *exp) publishMeter(name string, metric metrics.Meter) {
- m := metric.Snapshot()
- exp.getInt(name + ".count").Set(m.Count())
- exp.getFloat(name + ".one-minute").Set(m.Rate1())
- exp.getFloat(name + ".five-minute").Set(m.Rate5())
- exp.getFloat(name + ".fifteen-minute").Set(m.Rate15())
- exp.getFloat(name + ".mean").Set(m.RateMean())
-}
-
-func (exp *exp) publishTimer(name string, metric metrics.Timer) {
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- exp.getInt(name + ".count").Set(t.Count())
- exp.getFloat(name + ".min").Set(float64(t.Min()))
- exp.getFloat(name + ".max").Set(float64(t.Max()))
- exp.getFloat(name + ".mean").Set(t.Mean())
- exp.getFloat(name + ".std-dev").Set(t.StdDev())
- exp.getFloat(name + ".50-percentile").Set(ps[0])
- exp.getFloat(name + ".75-percentile").Set(ps[1])
- exp.getFloat(name + ".95-percentile").Set(ps[2])
- exp.getFloat(name + ".99-percentile").Set(ps[3])
- exp.getFloat(name + ".999-percentile").Set(ps[4])
- exp.getFloat(name + ".one-minute").Set(t.Rate1())
- exp.getFloat(name + ".five-minute").Set(t.Rate5())
- exp.getFloat(name + ".fifteen-minute").Set(t.Rate15())
- exp.getFloat(name + ".mean-rate").Set(t.RateMean())
-}
-
-func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer) {
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{50, 75, 95, 99})
- exp.getInt(name + ".count").Set(int64(len(t.Values())))
- exp.getFloat(name + ".mean").Set(t.Mean())
- exp.getInt(name + ".50-percentile").Set(ps[0])
- exp.getInt(name + ".75-percentile").Set(ps[1])
- exp.getInt(name + ".95-percentile").Set(ps[2])
- exp.getInt(name + ".99-percentile").Set(ps[3])
-}
-
-func (exp *exp) syncToExpvar() {
- exp.registry.Each(func(name string, i interface{}) {
- switch i := i.(type) {
- case metrics.Counter:
- exp.publishCounter(name, i)
- case metrics.Gauge:
- exp.publishGauge(name, i)
- case metrics.GaugeFloat64:
- exp.publishGaugeFloat64(name, i)
- case metrics.Histogram:
- exp.publishHistogram(name, i)
- case metrics.Meter:
- exp.publishMeter(name, i)
- case metrics.Timer:
- exp.publishTimer(name, i)
- case metrics.ResettingTimer:
- exp.publishResettingTimer(name, i)
- default:
- panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))
- }
- })
-}
diff --git a/metrics/gauge.go b/metrics/gauge.go
deleted file mode 100644
index b6b2758b0d..0000000000
--- a/metrics/gauge.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package metrics
-
-import "sync/atomic"
-
-// Gauges hold an int64 value that can be set arbitrarily.
-type Gauge interface {
- Snapshot() Gauge
- Update(int64)
- Dec(int64)
- Inc(int64)
- Value() int64
-}
-
-// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
-// new StandardGauge.
-func GetOrRegisterGauge(name string, r Registry) Gauge {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewGauge).(Gauge)
-}
-
-// NewGauge constructs a new StandardGauge.
-func NewGauge() Gauge {
- if !Enabled {
- return NilGauge{}
- }
- return &StandardGauge{0}
-}
-
-// NewRegisteredGauge constructs and registers a new StandardGauge.
-func NewRegisteredGauge(name string, r Registry) Gauge {
- c := NewGauge()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGauge(f func() int64) Gauge {
- if !Enabled {
- return NilGauge{}
- }
- return &FunctionalGauge{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
- c := NewFunctionalGauge(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeSnapshot is a read-only copy of another Gauge.
-type GaugeSnapshot int64
-
-// Snapshot returns the snapshot.
-func (g GaugeSnapshot) Snapshot() Gauge { return g }
-
-// Update panics.
-func (GaugeSnapshot) Update(int64) {
- panic("Update called on a GaugeSnapshot")
-}
-
-// Dec panics.
-func (GaugeSnapshot) Dec(int64) {
- panic("Dec called on a GaugeSnapshot")
-}
-
-// Inc panics.
-func (GaugeSnapshot) Inc(int64) {
- panic("Inc called on a GaugeSnapshot")
-}
-
-// Value returns the value at the time the snapshot was taken.
-func (g GaugeSnapshot) Value() int64 { return int64(g) }
-
-// NilGauge is a no-op Gauge.
-type NilGauge struct{}
-
-// Snapshot is a no-op.
-func (NilGauge) Snapshot() Gauge { return NilGauge{} }
-
-// Update is a no-op.
-func (NilGauge) Update(v int64) {}
-
-// Dec is a no-op.
-func (NilGauge) Dec(i int64) {}
-
-// Inc is a no-op.
-func (NilGauge) Inc(i int64) {}
-
-// Value is a no-op.
-func (NilGauge) Value() int64 { return 0 }
-
-// StandardGauge is the standard implementation of a Gauge and uses the
-// sync/atomic package to manage a single int64 value.
-type StandardGauge struct {
- value int64
-}
-
-// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGauge) Snapshot() Gauge {
- return GaugeSnapshot(g.Value())
-}
-
-// Update updates the gauge's value.
-func (g *StandardGauge) Update(v int64) {
- atomic.StoreInt64(&g.value, v)
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGauge) Value() int64 {
- return atomic.LoadInt64(&g.value)
-}
-
-// Dec decrements the gauge's current value by the given amount.
-func (g *StandardGauge) Dec(i int64) {
- atomic.AddInt64(&g.value, -i)
-}
-
-// Inc increments the gauge's current value by the given amount.
-func (g *StandardGauge) Inc(i int64) {
- atomic.AddInt64(&g.value, i)
-}
-
-// FunctionalGauge returns value from given function
-type FunctionalGauge struct {
- value func() int64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGauge) Value() int64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGauge) Update(int64) {
- panic("Update called on a FunctionalGauge")
-}
-
-// Dec panics.
-func (FunctionalGauge) Dec(int64) {
- panic("Dec called on a FunctionalGauge")
-}
-
-// Inc panics.
-func (FunctionalGauge) Inc(int64) {
- panic("Inc called on a FunctionalGauge")
-}
diff --git a/metrics/gauge_float64.go b/metrics/gauge_float64.go
deleted file mode 100644
index 66819c9577..0000000000
--- a/metrics/gauge_float64.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package metrics
-
-import "sync"
-
-// GaugeFloat64s hold a float64 value that can be set arbitrarily.
-type GaugeFloat64 interface {
- Snapshot() GaugeFloat64
- Update(float64)
- Value() float64
-}
-
-// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
-// new StandardGaugeFloat64.
-func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
-}
-
-// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
-func NewGaugeFloat64() GaugeFloat64 {
- if !Enabled {
- return NilGaugeFloat64{}
- }
- return &StandardGaugeFloat64{
- value: 0.0,
- }
-}
-
-// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
-func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
- c := NewGaugeFloat64()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
- if !Enabled {
- return NilGaugeFloat64{}
- }
- return &FunctionalGaugeFloat64{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
- c := NewFunctionalGaugeFloat64(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
-type GaugeFloat64Snapshot float64
-
-// Snapshot returns the snapshot.
-func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
-
-// Update panics.
-func (GaugeFloat64Snapshot) Update(float64) {
- panic("Update called on a GaugeFloat64Snapshot")
-}
-
-// Value returns the value at the time the snapshot was taken.
-func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
-
-// NilGauge is a no-op Gauge.
-type NilGaugeFloat64 struct{}
-
-// Snapshot is a no-op.
-func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
-
-// Update is a no-op.
-func (NilGaugeFloat64) Update(v float64) {}
-
-// Value is a no-op.
-func (NilGaugeFloat64) Value() float64 { return 0.0 }
-
-// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
-// sync.Mutex to manage a single float64 value.
-type StandardGaugeFloat64 struct {
- mutex sync.Mutex
- value float64
-}
-
-// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
- return GaugeFloat64Snapshot(g.Value())
-}
-
-// Update updates the gauge's value.
-func (g *StandardGaugeFloat64) Update(v float64) {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- g.value = v
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGaugeFloat64) Value() float64 {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- return g.value
-}
-
-// FunctionalGaugeFloat64 returns value from given function
-type FunctionalGaugeFloat64 struct {
- value func() float64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGaugeFloat64) Value() float64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGaugeFloat64) Update(float64) {
- panic("Update called on a FunctionalGaugeFloat64")
-}
diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go
deleted file mode 100644
index 02b75580c4..0000000000
--- a/metrics/gauge_float64_test.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package metrics
-
-import "testing"
-
-func BenchmarkGuageFloat64(b *testing.B) {
- g := NewGaugeFloat64()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- g.Update(float64(i))
- }
-}
-
-func TestGaugeFloat64(t *testing.T) {
- g := NewGaugeFloat64()
- g.Update(47.0)
- if v := g.Value(); 47.0 != v {
- t.Errorf("g.Value(): 47.0 != %v\n", v)
- }
-}
-
-func TestGaugeFloat64Snapshot(t *testing.T) {
- g := NewGaugeFloat64()
- g.Update(47.0)
- snapshot := g.Snapshot()
- g.Update(float64(0))
- if v := snapshot.Value(); 47.0 != v {
- t.Errorf("g.Value(): 47.0 != %v\n", v)
- }
-}
-
-func TestGetOrRegisterGaugeFloat64(t *testing.T) {
- r := NewRegistry()
- NewRegisteredGaugeFloat64("foo", r).Update(47.0)
- t.Logf("registry: %v", r)
- if g := GetOrRegisterGaugeFloat64("foo", r); 47.0 != g.Value() {
- t.Fatal(g)
- }
-}
-
-func TestFunctionalGaugeFloat64(t *testing.T) {
- var counter float64
- fg := NewFunctionalGaugeFloat64(func() float64 {
- counter++
- return counter
- })
- fg.Value()
- fg.Value()
- if counter != 2 {
- t.Error("counter != 2")
- }
-}
-
-func TestGetOrRegisterFunctionalGaugeFloat64(t *testing.T) {
- r := NewRegistry()
- NewRegisteredFunctionalGaugeFloat64("foo", r, func() float64 { return 47 })
- if g := GetOrRegisterGaugeFloat64("foo", r); g.Value() != 47 {
- t.Fatal(g)
- }
-}
diff --git a/metrics/gauge_test.go b/metrics/gauge_test.go
deleted file mode 100644
index 3aee143455..0000000000
--- a/metrics/gauge_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "testing"
-)
-
-func BenchmarkGuage(b *testing.B) {
- g := NewGauge()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- g.Update(int64(i))
- }
-}
-
-func TestGauge(t *testing.T) {
- g := NewGauge()
- g.Update(int64(47))
- if v := g.Value(); v != 47 {
- t.Errorf("g.Value(): 47 != %v\n", v)
- }
-}
-
-func TestGaugeSnapshot(t *testing.T) {
- g := NewGauge()
- g.Update(int64(47))
- snapshot := g.Snapshot()
- g.Update(int64(0))
- if v := snapshot.Value(); v != 47 {
- t.Errorf("g.Value(): 47 != %v\n", v)
- }
-}
-
-func TestGetOrRegisterGauge(t *testing.T) {
- r := NewRegistry()
- NewRegisteredGauge("foo", r).Update(47)
- if g := GetOrRegisterGauge("foo", r); g.Value() != 47 {
- t.Fatal(g)
- }
-}
-
-func TestFunctionalGauge(t *testing.T) {
- var counter int64
- fg := NewFunctionalGauge(func() int64 {
- counter++
- return counter
- })
- fg.Value()
- fg.Value()
- if counter != 2 {
- t.Error("counter != 2")
- }
-}
-
-func TestGetOrRegisterFunctionalGauge(t *testing.T) {
- r := NewRegistry()
- NewRegisteredFunctionalGauge("foo", r, func() int64 { return 47 })
- if g := GetOrRegisterGauge("foo", r); g.Value() != 47 {
- t.Fatal(g)
- }
-}
-
-func ExampleGetOrRegisterGauge() {
- m := "server.bytes_sent"
- g := GetOrRegisterGauge(m, nil)
- g.Update(47)
- fmt.Println(g.Value()) // Output: 47
-}
diff --git a/metrics/graphite.go b/metrics/graphite.go
deleted file mode 100644
index 142eec86be..0000000000
--- a/metrics/graphite.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package metrics
-
-import (
- "bufio"
- "fmt"
- "log"
- "net"
- "strconv"
- "strings"
- "time"
-)
-
-// GraphiteConfig provides a container with configuration parameters for
-// the Graphite exporter
-type GraphiteConfig struct {
- Addr *net.TCPAddr // Network address to connect to
- Registry Registry // Registry to be exported
- FlushInterval time.Duration // Flush interval
- DurationUnit time.Duration // Time conversion unit for durations
- Prefix string // Prefix to be prepended to metric names
- Percentiles []float64 // Percentiles to export from timers and histograms
-}
-
-// Graphite is a blocking exporter function which reports metrics in r
-// to a graphite server located at addr, flushing them every d duration
-// and prepending metric names with prefix.
-func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
- GraphiteWithConfig(GraphiteConfig{
- Addr: addr,
- Registry: r,
- FlushInterval: d,
- DurationUnit: time.Nanosecond,
- Prefix: prefix,
- Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
- })
-}
-
-// GraphiteWithConfig is a blocking exporter function just like Graphite,
-// but it takes a GraphiteConfig instead.
-func GraphiteWithConfig(c GraphiteConfig) {
- log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
- for range time.Tick(c.FlushInterval) {
- if err := graphite(&c); nil != err {
- log.Println(err)
- }
- }
-}
-
-// GraphiteOnce performs a single submission to Graphite, returning a
-// non-nil error on failed connections. This can be used in a loop
-// similar to GraphiteWithConfig for custom error handling.
-func GraphiteOnce(c GraphiteConfig) error {
- log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
- return graphite(&c)
-}
-
-func graphite(c *GraphiteConfig) error {
- now := time.Now().Unix()
- du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
- c.Registry.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
- case Gauge:
- fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
- case GaugeFloat64:
- fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles(c.Percentiles)
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
- fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
- fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
- fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
- for psIdx, psKey := range c.Percentiles {
- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
- fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
- }
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
- fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
- fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
- fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles(c.Percentiles)
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
- fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
- fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
- fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
- for psIdx, psKey := range c.Percentiles {
- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
- fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
- }
- fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
- fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
- fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
- fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
- }
- w.Flush()
- })
- return nil
-}
diff --git a/metrics/graphite_test.go b/metrics/graphite_test.go
deleted file mode 100644
index c797c781df..0000000000
--- a/metrics/graphite_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package metrics
-
-import (
- "net"
- "time"
-)
-
-func ExampleGraphite() {
- addr, _ := net.ResolveTCPAddr("net", ":2003")
- go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr)
-}
-
-func ExampleGraphiteWithConfig() {
- addr, _ := net.ResolveTCPAddr("net", ":2003")
- go GraphiteWithConfig(GraphiteConfig{
- Addr: addr,
- Registry: DefaultRegistry,
- FlushInterval: 1 * time.Second,
- DurationUnit: time.Millisecond,
- Percentiles: []float64{0.5, 0.75, 0.99, 0.999},
- })
-}
diff --git a/metrics/healthcheck.go b/metrics/healthcheck.go
deleted file mode 100644
index f1ae31e34a..0000000000
--- a/metrics/healthcheck.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package metrics
-
-// Healthchecks hold an error value describing an arbitrary up/down status.
-type Healthcheck interface {
- Check()
- Error() error
- Healthy()
- Unhealthy(error)
-}
-
-// NewHealthcheck constructs a new Healthcheck which will use the given
-// function to update its status.
-func NewHealthcheck(f func(Healthcheck)) Healthcheck {
- if !Enabled {
- return NilHealthcheck{}
- }
- return &StandardHealthcheck{nil, f}
-}
-
-// NilHealthcheck is a no-op.
-type NilHealthcheck struct{}
-
-// Check is a no-op.
-func (NilHealthcheck) Check() {}
-
-// Error is a no-op.
-func (NilHealthcheck) Error() error { return nil }
-
-// Healthy is a no-op.
-func (NilHealthcheck) Healthy() {}
-
-// Unhealthy is a no-op.
-func (NilHealthcheck) Unhealthy(error) {}
-
-// StandardHealthcheck is the standard implementation of a Healthcheck and
-// stores the status and a function to call to update the status.
-type StandardHealthcheck struct {
- err error
- f func(Healthcheck)
-}
-
-// Check runs the healthcheck function to update the healthcheck's status.
-func (h *StandardHealthcheck) Check() {
- h.f(h)
-}
-
-// Error returns the healthcheck's status, which will be nil if it is healthy.
-func (h *StandardHealthcheck) Error() error {
- return h.err
-}
-
-// Healthy marks the healthcheck as healthy.
-func (h *StandardHealthcheck) Healthy() {
- h.err = nil
-}
-
-// Unhealthy marks the healthcheck as unhealthy. The error is stored and
-// may be retrieved by the Error method.
-func (h *StandardHealthcheck) Unhealthy(err error) {
- h.err = err
-}
diff --git a/metrics/histogram.go b/metrics/histogram.go
deleted file mode 100644
index 2c54ce8b40..0000000000
--- a/metrics/histogram.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package metrics
-
-// Histograms calculate distribution statistics from a series of int64 values.
-type Histogram interface {
- Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Sample() Sample
- Snapshot() Histogram
- StdDev() float64
- Sum() int64
- Update(int64)
- Variance() float64
-}
-
-// GetOrRegisterHistogram returns an existing Histogram or constructs and
-// registers a new StandardHistogram.
-func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
-}
-
-// GetOrRegisterHistogramLazy returns an existing Histogram or constructs and
-// registers a new StandardHistogram.
-func GetOrRegisterHistogramLazy(name string, r Registry, s func() Sample) Histogram {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, func() Histogram { return NewHistogram(s()) }).(Histogram)
-}
-
-// NewHistogram constructs a new StandardHistogram from a Sample.
-func NewHistogram(s Sample) Histogram {
- if !Enabled {
- return NilHistogram{}
- }
- return &StandardHistogram{sample: s}
-}
-
-// NewRegisteredHistogram constructs and registers a new StandardHistogram from
-// a Sample.
-func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
- c := NewHistogram(s)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// HistogramSnapshot is a read-only copy of another Histogram.
-type HistogramSnapshot struct {
- sample *SampleSnapshot
-}
-
-// Clear panics.
-func (*HistogramSnapshot) Clear() {
- panic("Clear called on a HistogramSnapshot")
-}
-
-// Count returns the number of samples recorded at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample at the time the snapshot
-// was taken.
-func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the sample
-// at the time the snapshot was taken.
-func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *HistogramSnapshot) Sample() Sample { return h.sample }
-
-// Snapshot returns the snapshot.
-func (h *HistogramSnapshot) Snapshot() Histogram { return h }
-
-// StdDev returns the standard deviation of the values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample at the time the snapshot was taken.
-func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
-
-// Update panics.
-func (*HistogramSnapshot) Update(int64) {
- panic("Update called on a HistogramSnapshot")
-}
-
-// Variance returns the variance of inputs at the time the snapshot was taken.
-func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
-
-// NilHistogram is a no-op Histogram.
-type NilHistogram struct{}
-
-// Clear is a no-op.
-func (NilHistogram) Clear() {}
-
-// Count is a no-op.
-func (NilHistogram) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilHistogram) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilHistogram) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilHistogram) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilHistogram) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Sample is a no-op.
-func (NilHistogram) Sample() Sample { return NilSample{} }
-
-// Snapshot is a no-op.
-func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
-
-// StdDev is a no-op.
-func (NilHistogram) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilHistogram) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilHistogram) Update(v int64) {}
-
-// Variance is a no-op.
-func (NilHistogram) Variance() float64 { return 0.0 }
-
-// StandardHistogram is the standard implementation of a Histogram and uses a
-// Sample to bound its memory use.
-type StandardHistogram struct {
- sample Sample
-}
-
-// Clear clears the histogram and its sample.
-func (h *StandardHistogram) Clear() { h.sample.Clear() }
-
-// Count returns the number of samples recorded since the histogram was last
-// cleared.
-func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample.
-func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample.
-func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample.
-func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (h *StandardHistogram) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *StandardHistogram) Sample() Sample { return h.sample }
-
-// Snapshot returns a read-only copy of the histogram.
-func (h *StandardHistogram) Snapshot() Histogram {
- return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample.
-func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
-
-// Update samples a new value.
-func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
-
-// Variance returns the variance of the values in the sample.
-func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/metrics/histogram_test.go b/metrics/histogram_test.go
deleted file mode 100644
index 7c9f42fcec..0000000000
--- a/metrics/histogram_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package metrics
-
-import "testing"
-
-func BenchmarkHistogram(b *testing.B) {
- h := NewHistogram(NewUniformSample(100))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- h.Update(int64(i))
- }
-}
-
-func TestGetOrRegisterHistogram(t *testing.T) {
- r := NewRegistry()
- s := NewUniformSample(100)
- NewRegisteredHistogram("foo", r, s).Update(47)
- if h := GetOrRegisterHistogram("foo", r, s); h.Count() != 1 {
- t.Fatal(h)
- }
-}
-
-func TestHistogram10000(t *testing.T) {
- h := NewHistogram(NewUniformSample(100000))
- for i := 1; i <= 10000; i++ {
- h.Update(int64(i))
- }
- testHistogram10000(t, h)
-}
-
-func TestHistogramEmpty(t *testing.T) {
- h := NewHistogram(NewUniformSample(100))
- if count := h.Count(); count != 0 {
- t.Errorf("h.Count(): 0 != %v\n", count)
- }
- if min := h.Min(); min != 0 {
- t.Errorf("h.Min(): 0 != %v\n", min)
- }
- if max := h.Max(); max != 0 {
- t.Errorf("h.Max(): 0 != %v\n", max)
- }
- if mean := h.Mean(); mean != 0.0 {
- t.Errorf("h.Mean(): 0.0 != %v\n", mean)
- }
- if stdDev := h.StdDev(); stdDev != 0.0 {
- t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
- }
- ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
- if ps[0] != 0.0 {
- t.Errorf("median: 0.0 != %v\n", ps[0])
- }
- if ps[1] != 0.0 {
- t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
- }
- if ps[2] != 0.0 {
- t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
- }
-}
-
-func TestHistogramSnapshot(t *testing.T) {
- h := NewHistogram(NewUniformSample(100000))
- for i := 1; i <= 10000; i++ {
- h.Update(int64(i))
- }
- snapshot := h.Snapshot()
- h.Update(0)
- testHistogram10000(t, snapshot)
-}
-
-func testHistogram10000(t *testing.T, h Histogram) {
- if count := h.Count(); count != 10000 {
- t.Errorf("h.Count(): 10000 != %v\n", count)
- }
- if min := h.Min(); min != 1 {
- t.Errorf("h.Min(): 1 != %v\n", min)
- }
- if max := h.Max(); max != 10000 {
- t.Errorf("h.Max(): 10000 != %v\n", max)
- }
- if mean := h.Mean(); mean != 5000.5 {
- t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
- }
- if stdDev := h.StdDev(); stdDev != 2886.751331514372 {
- t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
- }
- ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
- if ps[0] != 5000.5 {
- t.Errorf("median: 5000.5 != %v\n", ps[0])
- }
- if ps[1] != 7500.75 {
- t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
- }
- if ps[2] != 9900.99 {
- t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
- }
-}
diff --git a/metrics/influxdb/LICENSE b/metrics/influxdb/LICENSE
deleted file mode 100644
index e5bf20cdb5..0000000000
--- a/metrics/influxdb/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2015 Vincent Rischmann
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/metrics/influxdb/README.md b/metrics/influxdb/README.md
deleted file mode 100644
index b76b1a3f99..0000000000
--- a/metrics/influxdb/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-go-metrics-influxdb
-===================
-
-This is a reporter for the [go-metrics](https://github.com/rcrowley/go-metrics) library which will post the metrics to [InfluxDB](https://influxdb.com/).
-
-Note
-----
-
-This is only compatible with InfluxDB 0.9+.
-
-Usage
------
-
-```go
-import "github.com/vrischmann/go-metrics-influxdb"
-
-go influxdb.InfluxDB(
- metrics.DefaultRegistry, // metrics registry
- time.Second * 10, // interval
- "http://localhost:8086", // the InfluxDB url
- "mydb", // your InfluxDB database
- "myuser", // your InfluxDB user
- "mypassword", // your InfluxDB password
-)
-```
-
-License
--------
-
-go-metrics-influxdb is licensed under the MIT license. See the LICENSE file for details.
diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go
deleted file mode 100644
index 4217bca5ba..0000000000
--- a/metrics/influxdb/influxdb.go
+++ /dev/null
@@ -1,259 +0,0 @@
-package influxdb
-
-import (
- "fmt"
- uurl "net/url"
- "time"
-
- "github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
- "github.com/influxdata/influxdb/client"
-)
-
-type reporter struct {
- reg metrics.Registry
- interval time.Duration
-
- url uurl.URL
- database string
- username string
- password string
- namespace string
- tags map[string]string
-
- client *client.Client
-
- cache map[string]int64
-}
-
-// InfluxDB starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval.
-func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, password, namespace string) {
- InfluxDBWithTags(r, d, url, database, username, password, namespace, nil)
-}
-
-// InfluxDBWithTags starts a InfluxDB reporter which will post the from the given metrics.Registry at each d interval with the specified tags
-func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) {
- u, err := uurl.Parse(url)
- if err != nil {
- log.Warn("Unable to parse InfluxDB", "url", url, "err", err)
- return
- }
-
- rep := &reporter{
- reg: r,
- interval: d,
- url: *u,
- database: database,
- username: username,
- password: password,
- namespace: namespace,
- tags: tags,
- cache: make(map[string]int64),
- }
- if err := rep.makeClient(); err != nil {
- log.Warn("Unable to make InfluxDB client", "err", err)
- return
- }
-
- rep.run()
-}
-
-// InfluxDBWithTagsOnce runs once an InfluxDB reporter and post the given metrics.Registry with the specified tags
-func InfluxDBWithTagsOnce(r metrics.Registry, url, database, username, password, namespace string, tags map[string]string) error {
- u, err := uurl.Parse(url)
- if err != nil {
- return fmt.Errorf("unable to parse InfluxDB. url: %s, err: %v", url, err)
- }
-
- rep := &reporter{
- reg: r,
- url: *u,
- database: database,
- username: username,
- password: password,
- namespace: namespace,
- tags: tags,
- cache: make(map[string]int64),
- }
- if err := rep.makeClient(); err != nil {
- return fmt.Errorf("unable to make InfluxDB client. err: %v", err)
- }
-
- if err := rep.send(); err != nil {
- return fmt.Errorf("unable to send to InfluxDB. err: %v", err)
- }
-
- return nil
-}
-
-func (r *reporter) makeClient() (err error) {
- r.client, err = client.NewClient(client.Config{
- URL: r.url,
- Username: r.username,
- Password: r.password,
- Timeout: 10 * time.Second,
- })
-
- return
-}
-
-func (r *reporter) run() {
- intervalTicker := time.NewTicker(r.interval)
- pingTicker := time.NewTicker(time.Second * 5)
-
- for {
- select {
- case <-intervalTicker.C:
- if err := r.send(); err != nil {
- log.Warn("Unable to send to InfluxDB", "err", err)
- }
- case <-pingTicker.C:
- _, _, err := r.client.Ping()
- if err != nil {
- log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err)
-
- if err = r.makeClient(); err != nil {
- log.Warn("Unable to make InfluxDB client", "err", err)
- }
- }
- }
- }
-}
-
-func (r *reporter) send() error {
- var pts []client.Point
-
- r.reg.Each(func(name string, i interface{}) {
- now := time.Now()
- namespace := r.namespace
-
- switch metric := i.(type) {
- case metrics.Counter:
- v := metric.Count()
- l := r.cache[name]
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.count", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": v - l,
- },
- Time: now,
- })
- r.cache[name] = v
- case metrics.Gauge:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": ms.Value(),
- },
- Time: now,
- })
- case metrics.GaugeFloat64:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.gauge", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "value": ms.Value(),
- },
- Time: now,
- })
- case metrics.Histogram:
- ms := metric.Snapshot()
-
- if ms.Count() > 0 {
- ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.histogram", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p50": ps[0],
- "p75": ps[1],
- "p95": ps[2],
- "p99": ps[3],
- "p999": ps[4],
- "p9999": ps[5],
- },
- Time: now,
- })
- }
- case metrics.Meter:
- ms := metric.Snapshot()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.meter", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "mean": ms.RateMean(),
- },
- Time: now,
- })
- case metrics.Timer:
- ms := metric.Snapshot()
- ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.timer", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": ms.Count(),
- "max": ms.Max(),
- "mean": ms.Mean(),
- "min": ms.Min(),
- "stddev": ms.StdDev(),
- "variance": ms.Variance(),
- "p50": ps[0],
- "p75": ps[1],
- "p95": ps[2],
- "p99": ps[3],
- "p999": ps[4],
- "p9999": ps[5],
- "m1": ms.Rate1(),
- "m5": ms.Rate5(),
- "m15": ms.Rate15(),
- "meanrate": ms.RateMean(),
- },
- Time: now,
- })
- case metrics.ResettingTimer:
- t := metric.Snapshot()
-
- if len(t.Values()) > 0 {
- ps := t.Percentiles([]float64{50, 95, 99})
- val := t.Values()
- pts = append(pts, client.Point{
- Measurement: fmt.Sprintf("%s%s.span", namespace, name),
- Tags: r.tags,
- Fields: map[string]interface{}{
- "count": len(val),
- "max": val[len(val)-1],
- "mean": t.Mean(),
- "min": val[0],
- "p50": ps[0],
- "p95": ps[1],
- "p99": ps[2],
- },
- Time: now,
- })
- }
- }
- })
-
- bps := client.BatchPoints{
- Points: pts,
- Database: r.database,
- }
-
- _, err := r.client.Write(bps)
- return err
-}
diff --git a/metrics/init_test.go b/metrics/init_test.go
deleted file mode 100644
index 43401e833c..0000000000
--- a/metrics/init_test.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package metrics
-
-func init() {
- Enabled = true
-}
diff --git a/metrics/json.go b/metrics/json.go
deleted file mode 100644
index 2087d8211e..0000000000
--- a/metrics/json.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package metrics
-
-import (
- "encoding/json"
- "io"
- "time"
-)
-
-// MarshalJSON returns a byte slice containing a JSON representation of all
-// the metrics in the Registry.
-func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
- return json.Marshal(r.GetAll())
-}
-
-// WriteJSON writes metrics from the given registry periodically to the
-// specified io.Writer as JSON.
-func WriteJSON(r Registry, d time.Duration, w io.Writer) {
- for range time.Tick(d) {
- WriteJSONOnce(r, w)
- }
-}
-
-// WriteJSONOnce writes metrics from the given registry to the specified
-// io.Writer as JSON.
-func WriteJSONOnce(r Registry, w io.Writer) {
- json.NewEncoder(w).Encode(r)
-}
-
-func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
- return json.Marshal(p.GetAll())
-}
diff --git a/metrics/json_test.go b/metrics/json_test.go
deleted file mode 100644
index f91fe8cfa5..0000000000
--- a/metrics/json_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package metrics
-
-import (
- "bytes"
- "encoding/json"
- "testing"
-)
-
-func TestRegistryMarshallJSON(t *testing.T) {
- b := &bytes.Buffer{}
- enc := json.NewEncoder(b)
- r := NewRegistry()
- r.Register("counter", NewCounter())
- enc.Encode(r)
- if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
- t.Fatalf(s)
- }
-}
-
-func TestRegistryWriteJSONOnce(t *testing.T) {
- r := NewRegistry()
- r.Register("counter", NewCounter())
- b := &bytes.Buffer{}
- WriteJSONOnce(r, b)
- if s := b.String(); s != "{\"counter\":{\"count\":0}}\n" {
- t.Fail()
- }
-}
diff --git a/metrics/librato/client.go b/metrics/librato/client.go
deleted file mode 100644
index f7aed3e4ef..0000000000
--- a/metrics/librato/client.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package librato
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
-)
-
-const Operations = "operations"
-const OperationsShort = "ops"
-
-type LibratoClient struct {
- Email, Token string
-}
-
-// property strings
-const (
- // display attributes
- Color = "color"
- DisplayMax = "display_max"
- DisplayMin = "display_min"
- DisplayUnitsLong = "display_units_long"
- DisplayUnitsShort = "display_units_short"
- DisplayStacked = "display_stacked"
- DisplayTransform = "display_transform"
- // special gauge display attributes
- SummarizeFunction = "summarize_function"
- Aggregate = "aggregate"
-
- // metric keys
- Name = "name"
- Period = "period"
- Description = "description"
- DisplayName = "display_name"
- Attributes = "attributes"
-
- // measurement keys
- MeasureTime = "measure_time"
- Source = "source"
- Value = "value"
-
- // special gauge keys
- Count = "count"
- Sum = "sum"
- Max = "max"
- Min = "min"
- SumSquares = "sum_squares"
-
- // batch keys
- Counters = "counters"
- Gauges = "gauges"
-
- MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics"
-)
-
-type Measurement map[string]interface{}
-type Metric map[string]interface{}
-
-type Batch struct {
- Gauges []Measurement `json:"gauges,omitempty"`
- Counters []Measurement `json:"counters,omitempty"`
- MeasureTime int64 `json:"measure_time"`
- Source string `json:"source"`
-}
-
-func (c *LibratoClient) PostMetrics(batch Batch) (err error) {
- var (
- js []byte
- req *http.Request
- resp *http.Response
- )
-
- if len(batch.Counters) == 0 && len(batch.Gauges) == 0 {
- return nil
- }
-
- if js, err = json.Marshal(batch); err != nil {
- return
- }
-
- if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
- return
- }
-
- req.Header.Set("Content-Type", "application/json")
- req.SetBasicAuth(c.Email, c.Token)
-
- if resp, err = http.DefaultClient.Do(req); err != nil {
- return
- }
-
- if resp.StatusCode != http.StatusOK {
- var body []byte
- if body, err = ioutil.ReadAll(resp.Body); err != nil {
- body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
- }
- err = fmt.Errorf("unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
- }
- return
-}
diff --git a/metrics/librato/librato.go b/metrics/librato/librato.go
deleted file mode 100644
index 9497207883..0000000000
--- a/metrics/librato/librato.go
+++ /dev/null
@@ -1,236 +0,0 @@
-package librato
-
-import (
- "fmt"
- "log"
- "math"
- "regexp"
- "time"
-
- "github.com/dominant-strategies/go-quai/metrics"
-)
-
-// a regexp for extracting the unit from time.Duration.String
-var unitRegexp = regexp.MustCompile(`[^\\d]+$`)
-
-// a helper that turns a time.Duration into librato display attributes for timer metrics
-func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
- attrs = make(map[string]interface{})
- attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d))
- attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String())))
- return
-}
-
-type Reporter struct {
- Email, Token string
- Namespace string
- Source string
- Interval time.Duration
- Registry metrics.Registry
- Percentiles []float64 // percentiles to report on histogram metrics
- TimerAttributes map[string]interface{} // units in which timers will be displayed
- intervalSec int64
-}
-
-func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
- return &Reporter{e, t, "", s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
-}
-
-func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
- NewReporter(r, d, e, t, s, p, u).Run()
-}
-
-func (rep *Reporter) Run() {
- log.Printf("WARNING: This client has been DEPRECATED! It has been moved to https://github.com/mihasya/go-metrics-librato and will be removed from rcrowley/go-metrics on August 5th 2015")
- ticker := time.NewTicker(rep.Interval)
- defer ticker.Stop()
- metricsApi := &LibratoClient{rep.Email, rep.Token}
- for now := range ticker.C {
- var metrics Batch
- var err error
- if metrics, err = rep.BuildRequest(now, rep.Registry); err != nil {
- log.Printf("ERROR constructing librato request body %s", err)
- continue
- }
- if err := metricsApi.PostMetrics(metrics); err != nil {
- log.Printf("ERROR sending metrics to librato %s", err)
- continue
- }
- }
-}
-
-// calculate sum of squares from data provided by metrics.Histogram
-// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
-func sumSquares(s metrics.Sample) float64 {
- count := float64(s.Count())
- sumSquared := math.Pow(count*s.Mean(), 2)
- sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
- if math.IsNaN(sumSquares) {
- return 0.0
- }
- return sumSquares
-}
-func sumSquaresTimer(t metrics.Timer) float64 {
- count := float64(t.Count())
- sumSquared := math.Pow(count*t.Mean(), 2)
- sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
- if math.IsNaN(sumSquares) {
- return 0.0
- }
- return sumSquares
-}
-
-func (rep *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
- snapshot = Batch{
- // coerce timestamps to a stepping fn so that they line up in Librato graphs
- MeasureTime: (now.Unix() / rep.intervalSec) * rep.intervalSec,
- Source: rep.Source,
- }
- snapshot.Gauges = make([]Measurement, 0)
- snapshot.Counters = make([]Measurement, 0)
- histogramGaugeCount := 1 + len(rep.Percentiles)
- r.Each(func(name string, metric interface{}) {
- if rep.Namespace != "" {
- name = fmt.Sprintf("%s.%s", rep.Namespace, name)
- }
- measurement := Measurement{}
- measurement[Period] = rep.Interval.Seconds()
- switch m := metric.(type) {
- case metrics.Counter:
- if m.Count() > 0 {
- measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
- measurement[Value] = float64(m.Count())
- measurement[Attributes] = map[string]interface{}{
- DisplayUnitsLong: Operations,
- DisplayUnitsShort: OperationsShort,
- DisplayMin: "0",
- }
- snapshot.Counters = append(snapshot.Counters, measurement)
- }
- case metrics.Gauge:
- measurement[Name] = name
- measurement[Value] = float64(m.Value())
- snapshot.Gauges = append(snapshot.Gauges, measurement)
- case metrics.GaugeFloat64:
- measurement[Name] = name
- measurement[Value] = m.Value()
- snapshot.Gauges = append(snapshot.Gauges, measurement)
- case metrics.Histogram:
- if m.Count() > 0 {
- gauges := make([]Measurement, histogramGaugeCount)
- s := m.Sample()
- measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
- measurement[Count] = uint64(s.Count())
- measurement[Max] = float64(s.Max())
- measurement[Min] = float64(s.Min())
- measurement[Sum] = float64(s.Sum())
- measurement[SumSquares] = sumSquares(s)
- gauges[0] = measurement
- for i, p := range rep.Percentiles {
- gauges[i+1] = Measurement{
- Name: fmt.Sprintf("%s.%.2f", measurement[Name], p),
- Value: s.Percentile(p),
- Period: measurement[Period],
- }
- }
- snapshot.Gauges = append(snapshot.Gauges, gauges...)
- }
- case metrics.Meter:
- measurement[Name] = name
- measurement[Value] = float64(m.Count())
- snapshot.Counters = append(snapshot.Counters, measurement)
- snapshot.Gauges = append(snapshot.Gauges,
- Measurement{
- Name: fmt.Sprintf("%s.%s", name, "1min"),
- Value: m.Rate1(),
- Period: int64(rep.Interval.Seconds()),
- Attributes: map[string]interface{}{
- DisplayUnitsLong: Operations,
- DisplayUnitsShort: OperationsShort,
- DisplayMin: "0",
- },
- },
- Measurement{
- Name: fmt.Sprintf("%s.%s", name, "5min"),
- Value: m.Rate5(),
- Period: int64(rep.Interval.Seconds()),
- Attributes: map[string]interface{}{
- DisplayUnitsLong: Operations,
- DisplayUnitsShort: OperationsShort,
- DisplayMin: "0",
- },
- },
- Measurement{
- Name: fmt.Sprintf("%s.%s", name, "15min"),
- Value: m.Rate15(),
- Period: int64(rep.Interval.Seconds()),
- Attributes: map[string]interface{}{
- DisplayUnitsLong: Operations,
- DisplayUnitsShort: OperationsShort,
- DisplayMin: "0",
- },
- },
- )
- case metrics.Timer:
- measurement[Name] = name
- measurement[Value] = float64(m.Count())
- snapshot.Counters = append(snapshot.Counters, measurement)
- if m.Count() > 0 {
- libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
- gauges := make([]Measurement, histogramGaugeCount)
- gauges[0] = Measurement{
- Name: libratoName,
- Count: uint64(m.Count()),
- Sum: m.Mean() * float64(m.Count()),
- Max: float64(m.Max()),
- Min: float64(m.Min()),
- SumSquares: sumSquaresTimer(m),
- Period: int64(rep.Interval.Seconds()),
- Attributes: rep.TimerAttributes,
- }
- for i, p := range rep.Percentiles {
- gauges[i+1] = Measurement{
- Name: fmt.Sprintf("%s.timer.%2.0f", name, p*100),
- Value: m.Percentile(p),
- Period: int64(rep.Interval.Seconds()),
- Attributes: rep.TimerAttributes,
- }
- }
- snapshot.Gauges = append(snapshot.Gauges, gauges...)
- snapshot.Gauges = append(snapshot.Gauges,
- Measurement{
- Name: fmt.Sprintf("%s.%s", name, "rate.1min"),
- Value: m.Rate1(),
- Period: int64(rep.Interval.Seconds()),
- Attributes: map[string]interface{}{
- DisplayUnitsLong: Operations,
- DisplayUnitsShort: OperationsShort,
- DisplayMin: "0",
- },
- },
- Measurement{
- Name: fmt.Sprintf("%s.%s", name, "rate.5min"),
- Value: m.Rate5(),
- Period: int64(rep.Interval.Seconds()),
- Attributes: map[string]interface{}{
- DisplayUnitsLong: Operations,
- DisplayUnitsShort: OperationsShort,
- DisplayMin: "0",
- },
- },
- Measurement{
- Name: fmt.Sprintf("%s.%s", name, "rate.15min"),
- Value: m.Rate15(),
- Period: int64(rep.Interval.Seconds()),
- Attributes: map[string]interface{}{
- DisplayUnitsLong: Operations,
- DisplayUnitsShort: OperationsShort,
- DisplayMin: "0",
- },
- },
- )
- }
- }
- })
- return
-}
diff --git a/metrics/log.go b/metrics/log.go
deleted file mode 100644
index 0c8ea7c971..0000000000
--- a/metrics/log.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package metrics
-
-import (
- "time"
-)
-
-type Logger interface {
- Printf(format string, v ...interface{})
-}
-
-func Log(r Registry, freq time.Duration, l Logger) {
- LogScaled(r, freq, time.Nanosecond, l)
-}
-
-// Output each metric in the given registry periodically using the given
-// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
-func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
- du := float64(scale)
- duSuffix := scale.String()[1:]
-
- for range time.Tick(freq) {
- r.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- l.Printf("counter %s\n", name)
- l.Printf(" count: %9d\n", metric.Count())
- case Gauge:
- l.Printf("gauge %s\n", name)
- l.Printf(" value: %9d\n", metric.Value())
- case GaugeFloat64:
- l.Printf("gauge %s\n", name)
- l.Printf(" value: %f\n", metric.Value())
- case Healthcheck:
- metric.Check()
- l.Printf("healthcheck %s\n", name)
- l.Printf(" error: %v\n", metric.Error())
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- l.Printf("histogram %s\n", name)
- l.Printf(" count: %9d\n", h.Count())
- l.Printf(" min: %9d\n", h.Min())
- l.Printf(" max: %9d\n", h.Max())
- l.Printf(" mean: %12.2f\n", h.Mean())
- l.Printf(" stddev: %12.2f\n", h.StdDev())
- l.Printf(" median: %12.2f\n", ps[0])
- l.Printf(" 75%%: %12.2f\n", ps[1])
- l.Printf(" 95%%: %12.2f\n", ps[2])
- l.Printf(" 99%%: %12.2f\n", ps[3])
- l.Printf(" 99.9%%: %12.2f\n", ps[4])
- case Meter:
- m := metric.Snapshot()
- l.Printf("meter %s\n", name)
- l.Printf(" count: %9d\n", m.Count())
- l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
- l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
- l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
- l.Printf(" mean rate: %12.2f\n", m.RateMean())
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- l.Printf("timer %s\n", name)
- l.Printf(" count: %9d\n", t.Count())
- l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
- l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
- l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
- l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
- l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
- l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
- l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
- l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
- l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
- l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
- l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
- l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
- l.Printf(" mean rate: %12.2f\n", t.RateMean())
- }
- })
- }
-}
diff --git a/metrics/memory.md b/metrics/memory.md
deleted file mode 100644
index 47454f54b6..0000000000
--- a/metrics/memory.md
+++ /dev/null
@@ -1,285 +0,0 @@
-Memory usage
-============
-
-(Highly unscientific.)
-
-Command used to gather static memory usage:
-
-```sh
-grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
-```
-
-Program used to gather baseline memory usage:
-
-```go
-package main
-
-import "time"
-
-func main() {
- time.Sleep(600e9)
-}
-```
-
-Baseline
---------
-
-```
-VmPeak: 42604 kB
-VmSize: 42604 kB
-VmLck: 0 kB
-VmHWM: 1120 kB
-VmRSS: 1120 kB
-VmData: 35460 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 36 kB
-VmSwap: 0 kB
-```
-
-Program used to gather metric memory usage (with other metrics being similar):
-
-```go
-package main
-
-import (
- "fmt"
- "metrics"
- "time"
-)
-
-func main() {
- fmt.Sprintf("foo")
- metrics.NewRegistry()
- time.Sleep(600e9)
-}
-```
-
-1000 counters registered
-------------------------
-
-```
-VmPeak: 44016 kB
-VmSize: 44016 kB
-VmLck: 0 kB
-VmHWM: 1928 kB
-VmRSS: 1928 kB
-VmData: 36868 kB
-VmStk: 136 kB
-VmExe: 1024 kB
-VmLib: 1848 kB
-VmPTE: 40 kB
-VmSwap: 0 kB
-```
-
-**1.412 kB virtual, TODO 0.808 kB resident per counter.**
-
-100000 counters registered
---------------------------
-
-```
-VmPeak: 55024 kB
-VmSize: 55024 kB
-VmLck: 0 kB
-VmHWM: 12440 kB
-VmRSS: 12440 kB
-VmData: 47876 kB
-VmStk: 136 kB
-VmExe: 1024 kB
-VmLib: 1848 kB
-VmPTE: 64 kB
-VmSwap: 0 kB
-```
-
-**0.1242 kB virtual, 0.1132 kB resident per counter.**
-
-1000 gauges registered
-----------------------
-
-```
-VmPeak: 44012 kB
-VmSize: 44012 kB
-VmLck: 0 kB
-VmHWM: 1928 kB
-VmRSS: 1928 kB
-VmData: 36868 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 40 kB
-VmSwap: 0 kB
-```
-
-**1.408 kB virtual, 0.808 kB resident per counter.**
-
-100000 gauges registered
-------------------------
-
-```
-VmPeak: 55020 kB
-VmSize: 55020 kB
-VmLck: 0 kB
-VmHWM: 12432 kB
-VmRSS: 12432 kB
-VmData: 47876 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 60 kB
-VmSwap: 0 kB
-```
-
-**0.12416 kB virtual, 0.11312 resident per gauge.**
-
-1000 histograms with a uniform sample size of 1028
---------------------------------------------------
-
-```
-VmPeak: 72272 kB
-VmSize: 72272 kB
-VmLck: 0 kB
-VmHWM: 16204 kB
-VmRSS: 16204 kB
-VmData: 65100 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 80 kB
-VmSwap: 0 kB
-```
-
-**29.668 kB virtual, TODO 15.084 resident per histogram.**
-
-10000 histograms with a uniform sample size of 1028
----------------------------------------------------
-
-```
-VmPeak: 256912 kB
-VmSize: 256912 kB
-VmLck: 0 kB
-VmHWM: 146204 kB
-VmRSS: 146204 kB
-VmData: 249740 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 448 kB
-VmSwap: 0 kB
-```
-
-**21.4308 kB virtual, 14.5084 kB resident per histogram.**
-
-50000 histograms with a uniform sample size of 1028
----------------------------------------------------
-
-```
-VmPeak: 908112 kB
-VmSize: 908112 kB
-VmLck: 0 kB
-VmHWM: 645832 kB
-VmRSS: 645588 kB
-VmData: 900940 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 1716 kB
-VmSwap: 1544 kB
-```
-
-**17.31016 kB virtual, 12.88936 kB resident per histogram.**
-
-1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
--------------------------------------------------------------------------------------
-
-```
-VmPeak: 62480 kB
-VmSize: 62480 kB
-VmLck: 0 kB
-VmHWM: 11572 kB
-VmRSS: 11572 kB
-VmData: 55308 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 64 kB
-VmSwap: 0 kB
-```
-
-**19.876 kB virtual, 10.452 kB resident per histogram.**
-
-10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
---------------------------------------------------------------------------------------
-
-```
-VmPeak: 153296 kB
-VmSize: 153296 kB
-VmLck: 0 kB
-VmHWM: 101176 kB
-VmRSS: 101176 kB
-VmData: 146124 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 240 kB
-VmSwap: 0 kB
-```
-
-**11.0692 kB virtual, 10.0056 kB resident per histogram.**
-
-50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
---------------------------------------------------------------------------------------
-
-```
-VmPeak: 557264 kB
-VmSize: 557264 kB
-VmLck: 0 kB
-VmHWM: 501056 kB
-VmRSS: 501056 kB
-VmData: 550092 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 1032 kB
-VmSwap: 0 kB
-```
-
-**10.2932 kB virtual, 9.99872 kB resident per histogram.**
-
-1000 meters
------------
-
-```
-VmPeak: 74504 kB
-VmSize: 74504 kB
-VmLck: 0 kB
-VmHWM: 24124 kB
-VmRSS: 24124 kB
-VmData: 67340 kB
-VmStk: 136 kB
-VmExe: 1040 kB
-VmLib: 1848 kB
-VmPTE: 92 kB
-VmSwap: 0 kB
-```
-
-**31.9 kB virtual, 23.004 kB resident per meter.**
-
-10000 meters
-------------
-
-```
-VmPeak: 278920 kB
-VmSize: 278920 kB
-VmLck: 0 kB
-VmHWM: 227300 kB
-VmRSS: 227300 kB
-VmData: 271756 kB
-VmStk: 136 kB
-VmExe: 1040 kB
-VmLib: 1848 kB
-VmPTE: 488 kB
-VmSwap: 0 kB
-```
-
-**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/metrics/meter.go b/metrics/meter.go
deleted file mode 100644
index 60ae919d04..0000000000
--- a/metrics/meter.go
+++ /dev/null
@@ -1,301 +0,0 @@
-package metrics
-
-import (
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Meters count events to produce exponentially-weighted moving average rates
-// at one-, five-, and fifteen-minutes and a mean rate.
-type Meter interface {
- Count() int64
- Mark(int64)
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Meter
- Stop()
-}
-
-// GetOrRegisterMeter returns an existing Meter or constructs and registers a
-// new StandardMeter.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func GetOrRegisterMeter(name string, r Registry) Meter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewMeter).(Meter)
-}
-
-// GetOrRegisterMeterForced returns an existing Meter or constructs and registers a
-// new StandardMeter no matter the global switch is enabled or not.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func GetOrRegisterMeterForced(name string, r Registry) Meter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewMeterForced).(Meter)
-}
-
-// NewMeter constructs a new StandardMeter and launches a goroutine.
-// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
-func NewMeter() Meter {
- if !Enabled {
- return NilMeter{}
- }
- m := newStandardMeter()
- arbiter.Lock()
- defer arbiter.Unlock()
- arbiter.meters[m] = struct{}{}
- if !arbiter.started {
- arbiter.started = true
- go arbiter.tick()
- }
- return m
-}
-
-// NewMeterForced constructs a new StandardMeter and launches a goroutine no matter
-// the global switch is enabled or not.
-// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
-func NewMeterForced() Meter {
- m := newStandardMeter()
- arbiter.Lock()
- defer arbiter.Unlock()
- arbiter.meters[m] = struct{}{}
- if !arbiter.started {
- arbiter.started = true
- go arbiter.tick()
- }
- return m
-}
-
-// NewRegisteredMeter constructs and registers a new StandardMeter
-// and launches a goroutine.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func NewRegisteredMeter(name string, r Registry) Meter {
- c := NewMeter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewRegisteredMeterForced constructs and registers a new StandardMeter
-// and launches a goroutine no matter the global switch is enabled or not.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func NewRegisteredMeterForced(name string, r Registry) Meter {
- c := NewMeterForced()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// MeterSnapshot is a read-only copy of another Meter.
-type MeterSnapshot struct {
- // WARNING: The `temp` field is accessed atomically.
- // On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is
- // guaranteed to be so aligned, so take advantage of that. For more information,
- // see https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
- temp int64
- count int64
- rate1, rate5, rate15, rateMean float64
-}
-
-// Count returns the count of events at the time the snapshot was taken.
-func (m *MeterSnapshot) Count() int64 { return m.count }
-
-// Mark panics.
-func (*MeterSnapshot) Mark(n int64) {
- panic("Mark called on a MeterSnapshot")
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
-
-// Snapshot returns the snapshot.
-func (m *MeterSnapshot) Snapshot() Meter { return m }
-
-// Stop is a no-op.
-func (m *MeterSnapshot) Stop() {}
-
-// NilMeter is a no-op Meter.
-type NilMeter struct{}
-
-// Count is a no-op.
-func (NilMeter) Count() int64 { return 0 }
-
-// Mark is a no-op.
-func (NilMeter) Mark(n int64) {}
-
-// Rate1 is a no-op.
-func (NilMeter) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilMeter) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilMeter) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilMeter) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilMeter) Snapshot() Meter { return NilMeter{} }
-
-// Stop is a no-op.
-func (NilMeter) Stop() {}
-
-// StandardMeter is the standard implementation of a Meter.
-type StandardMeter struct {
- lock sync.RWMutex
- snapshot *MeterSnapshot
- a1, a5, a15 EWMA
- startTime time.Time
- stopped uint32
-}
-
-func newStandardMeter() *StandardMeter {
- return &StandardMeter{
- snapshot: &MeterSnapshot{},
- a1: NewEWMA1(),
- a5: NewEWMA5(),
- a15: NewEWMA15(),
- startTime: time.Now(),
- }
-}
-
-// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
-func (m *StandardMeter) Stop() {
- stopped := atomic.SwapUint32(&m.stopped, 1)
- if stopped != 1 {
- arbiter.Lock()
- delete(arbiter.meters, m)
- arbiter.Unlock()
- }
-}
-
-// Count returns the number of events recorded.
-// It updates the meter to be as accurate as possible
-func (m *StandardMeter) Count() int64 {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.updateMeter()
- return m.snapshot.count
-}
-
-// Mark records the occurrence of n events.
-func (m *StandardMeter) Mark(n int64) {
- atomic.AddInt64(&m.snapshot.temp, n)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (m *StandardMeter) Rate1() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rate1
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (m *StandardMeter) Rate5() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rate5
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (m *StandardMeter) Rate15() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rate15
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (m *StandardMeter) RateMean() float64 {
- m.lock.RLock()
- defer m.lock.RUnlock()
- return m.snapshot.rateMean
-}
-
-// Snapshot returns a read-only copy of the meter.
-func (m *StandardMeter) Snapshot() Meter {
- m.lock.RLock()
- snapshot := *m.snapshot
- m.lock.RUnlock()
- return &snapshot
-}
-
-func (m *StandardMeter) updateSnapshot() {
- // should run with write lock held on m.lock
- snapshot := m.snapshot
- snapshot.rate1 = m.a1.Rate()
- snapshot.rate5 = m.a5.Rate()
- snapshot.rate15 = m.a15.Rate()
- snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
-}
-
-func (m *StandardMeter) updateMeter() {
- // should only run with write lock held on m.lock
- n := atomic.SwapInt64(&m.snapshot.temp, 0)
- m.snapshot.count += n
- m.a1.Update(n)
- m.a5.Update(n)
- m.a15.Update(n)
-}
-
-func (m *StandardMeter) tick() {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.updateMeter()
- m.a1.Tick()
- m.a5.Tick()
- m.a15.Tick()
- m.updateSnapshot()
-}
-
-// meterArbiter ticks meters every 5s from a single goroutine.
-// meters are references in a set for future stopping.
-type meterArbiter struct {
- sync.RWMutex
- started bool
- meters map[*StandardMeter]struct{}
- ticker *time.Ticker
-}
-
-var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})}
-
-// Ticks meters on the scheduled interval
-func (ma *meterArbiter) tick() {
- for range ma.ticker.C {
- ma.tickMeters()
- }
-}
-
-func (ma *meterArbiter) tickMeters() {
- ma.RLock()
- defer ma.RUnlock()
- for meter := range ma.meters {
- meter.tick()
- }
-}
diff --git a/metrics/meter_test.go b/metrics/meter_test.go
deleted file mode 100644
index b3f6cb8c0c..0000000000
--- a/metrics/meter_test.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package metrics
-
-import (
- "testing"
- "time"
-)
-
-func BenchmarkMeter(b *testing.B) {
- m := NewMeter()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- m.Mark(1)
- }
-}
-
-func TestGetOrRegisterMeter(t *testing.T) {
- r := NewRegistry()
- NewRegisteredMeter("foo", r).Mark(47)
- if m := GetOrRegisterMeter("foo", r); m.Count() != 47 {
- t.Fatal(m.Count())
- }
-}
-
-func TestMeterDecay(t *testing.T) {
- ma := meterArbiter{
- ticker: time.NewTicker(time.Millisecond),
- meters: make(map[*StandardMeter]struct{}),
- }
- defer ma.ticker.Stop()
- m := newStandardMeter()
- ma.meters[m] = struct{}{}
- m.Mark(1)
- ma.tickMeters()
- rateMean := m.RateMean()
- time.Sleep(100 * time.Millisecond)
- ma.tickMeters()
- if m.RateMean() >= rateMean {
- t.Error("m.RateMean() didn't decrease")
- }
-}
-
-func TestMeterNonzero(t *testing.T) {
- m := NewMeter()
- m.Mark(3)
- if count := m.Count(); count != 3 {
- t.Errorf("m.Count(): 3 != %v\n", count)
- }
-}
-
-func TestMeterStop(t *testing.T) {
- l := len(arbiter.meters)
- m := NewMeter()
- if l+1 != len(arbiter.meters) {
- t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters))
- }
- m.Stop()
- if l != len(arbiter.meters) {
- t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
- }
-}
-
-func TestMeterSnapshot(t *testing.T) {
- m := NewMeter()
- m.Mark(1)
- if snapshot := m.Snapshot(); m.RateMean() != snapshot.RateMean() {
- t.Fatal(snapshot)
- }
-}
-
-func TestMeterZero(t *testing.T) {
- m := NewMeter()
- if count := m.Count(); count != 0 {
- t.Errorf("m.Count(): 0 != %v\n", count)
- }
-}
-
-func TestMeterRepeat(t *testing.T) {
- m := NewMeter()
- for i := 0; i < 101; i++ {
- m.Mark(int64(i))
- }
- if count := m.Count(); count != 5050 {
- t.Errorf("m.Count(): 5050 != %v\n", count)
- }
- for i := 0; i < 101; i++ {
- m.Mark(int64(i))
- }
- if count := m.Count(); count != 10100 {
- t.Errorf("m.Count(): 10100 != %v\n", count)
- }
-}
diff --git a/metrics/metrics.go b/metrics/metrics.go
deleted file mode 100644
index f2375d4160..0000000000
--- a/metrics/metrics.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Go port of Coda Hale's Metrics library
-//
-//
-//
-// Coda Hale's original work:
-package metrics
-
-import (
- "os"
- "runtime"
- "strings"
- "time"
-
- "github.com/dominant-strategies/go-quai/log"
-)
-
-// Enabled is checked by the constructor functions for all of the
-// standard metrics. If it is true, the metric returned is a stub.
-//
-// This global kill-switch helps quantify the observer effect and makes
-// for less cluttered pprof profiles.
-var Enabled = false
-
-// EnabledExpensive is a soft-flag meant for external packages to check if costly
-// metrics gathering is allowed or not. The goal is to separate standard metrics
-// for health monitoring and debug metrics that might impact runtime performance.
-var EnabledExpensive = false
-
-// enablerFlags is the CLI flag names to use to enable metrics collections.
-var enablerFlags = []string{"metrics"}
-
-// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
-var expensiveEnablerFlags = []string{"metrics.expensive"}
-
-// Init enables or disables the metrics system. Since we need this to run before
-// any other code gets to create meters and timers, we'll actually do an ugly hack
-// and peek into the command line args for the metrics flag.
-func init() {
- for _, arg := range os.Args {
- flag := strings.TrimLeft(arg, "-")
-
- for _, enabler := range enablerFlags {
- if !Enabled && flag == enabler {
- log.Info("Enabling metrics collection")
- Enabled = true
- }
- }
- for _, enabler := range expensiveEnablerFlags {
- if !EnabledExpensive && flag == enabler {
- log.Info("Enabling expensive metrics collection")
- EnabledExpensive = true
- }
- }
- }
-}
-
-// CollectProcessMetrics periodically collects various metrics about the running
-// process.
-func CollectProcessMetrics(refresh time.Duration) {
- // Short circuit if the metrics system is disabled
- if !Enabled {
- return
- }
- refreshFreq := int64(refresh / time.Second)
-
- // Create the various data collectors
- cpuStats := make([]*CPUStats, 2)
- memstats := make([]*runtime.MemStats, 2)
- diskstats := make([]*DiskStats, 2)
- for i := 0; i < len(memstats); i++ {
- cpuStats[i] = new(CPUStats)
- memstats[i] = new(runtime.MemStats)
- diskstats[i] = new(DiskStats)
- }
- // Define the various metrics to collect
- var (
- cpuSysLoad = GetOrRegisterGauge("system/cpu/sysload", DefaultRegistry)
- cpuSysWait = GetOrRegisterGauge("system/cpu/syswait", DefaultRegistry)
- cpuProcLoad = GetOrRegisterGauge("system/cpu/procload", DefaultRegistry)
- cpuThreads = GetOrRegisterGauge("system/cpu/threads", DefaultRegistry)
- cpuGoroutines = GetOrRegisterGauge("system/cpu/goroutines", DefaultRegistry)
-
- memPauses = GetOrRegisterMeter("system/memory/pauses", DefaultRegistry)
- memAllocs = GetOrRegisterMeter("system/memory/allocs", DefaultRegistry)
- memFrees = GetOrRegisterMeter("system/memory/frees", DefaultRegistry)
- memHeld = GetOrRegisterGauge("system/memory/held", DefaultRegistry)
- memUsed = GetOrRegisterGauge("system/memory/used", DefaultRegistry)
-
- diskReads = GetOrRegisterMeter("system/disk/readcount", DefaultRegistry)
- diskReadBytes = GetOrRegisterMeter("system/disk/readdata", DefaultRegistry)
- diskReadBytesCounter = GetOrRegisterCounter("system/disk/readbytes", DefaultRegistry)
- diskWrites = GetOrRegisterMeter("system/disk/writecount", DefaultRegistry)
- diskWriteBytes = GetOrRegisterMeter("system/disk/writedata", DefaultRegistry)
- diskWriteBytesCounter = GetOrRegisterCounter("system/disk/writebytes", DefaultRegistry)
- )
- // Iterate loading the different stats and updating the meters
- for i := 1; ; i++ {
- location1 := i % 2
- location2 := (i - 1) % 2
-
- ReadCPUStats(cpuStats[location1])
- cpuSysLoad.Update((cpuStats[location1].GlobalTime - cpuStats[location2].GlobalTime) / refreshFreq)
- cpuSysWait.Update((cpuStats[location1].GlobalWait - cpuStats[location2].GlobalWait) / refreshFreq)
- cpuProcLoad.Update((cpuStats[location1].LocalTime - cpuStats[location2].LocalTime) / refreshFreq)
- cpuThreads.Update(int64(threadCreateProfile.Count()))
- cpuGoroutines.Update(int64(runtime.NumGoroutine()))
-
- runtime.ReadMemStats(memstats[location1])
- memPauses.Mark(int64(memstats[location1].PauseTotalNs - memstats[location2].PauseTotalNs))
- memAllocs.Mark(int64(memstats[location1].Mallocs - memstats[location2].Mallocs))
- memFrees.Mark(int64(memstats[location1].Frees - memstats[location2].Frees))
- memHeld.Update(int64(memstats[location1].HeapSys - memstats[location1].HeapReleased))
- memUsed.Update(int64(memstats[location1].Alloc))
-
- if ReadDiskStats(diskstats[location1]) == nil {
- diskReads.Mark(diskstats[location1].ReadCount - diskstats[location2].ReadCount)
- diskReadBytes.Mark(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes)
- diskWrites.Mark(diskstats[location1].WriteCount - diskstats[location2].WriteCount)
- diskWriteBytes.Mark(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes)
-
- diskReadBytesCounter.Inc(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes)
- diskWriteBytesCounter.Inc(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes)
- }
- time.Sleep(refresh)
- }
-}
diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go
deleted file mode 100644
index df36da0ade..0000000000
--- a/metrics/metrics_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "sync"
- "testing"
- "time"
-)
-
-const FANOUT = 128
-
-// Stop the compiler from complaining during debugging.
-var (
- _ = ioutil.Discard
- _ = log.LstdFlags
-)
-
-func BenchmarkMetrics(b *testing.B) {
- r := NewRegistry()
- c := NewRegisteredCounter("counter", r)
- g := NewRegisteredGauge("gauge", r)
- gf := NewRegisteredGaugeFloat64("gaugefloat64", r)
- h := NewRegisteredHistogram("histogram", r, NewUniformSample(100))
- m := NewRegisteredMeter("meter", r)
- t := NewRegisteredTimer("timer", r)
- RegisterDebugGCStats(r)
- RegisterRuntimeMemStats(r)
- b.ResetTimer()
- ch := make(chan bool)
-
- wgD := &sync.WaitGroup{}
- /*
- wgD.Add(1)
- go func() {
- defer wgD.Done()
- //log.Println("go CaptureDebugGCStats")
- for {
- select {
- case <-ch:
- //log.Println("done CaptureDebugGCStats")
- return
- default:
- CaptureDebugGCStatsOnce(r)
- }
- }
- }()
- //*/
-
- wgR := &sync.WaitGroup{}
- //*
- wgR.Add(1)
- go func() {
- defer wgR.Done()
- //log.Println("go CaptureRuntimeMemStats")
- for {
- select {
- case <-ch:
- //log.Println("done CaptureRuntimeMemStats")
- return
- default:
- CaptureRuntimeMemStatsOnce(r)
- }
- }
- }()
- //*/
-
- wgW := &sync.WaitGroup{}
- /*
- wgW.Add(1)
- go func() {
- defer wgW.Done()
- //log.Println("go Write")
- for {
- select {
- case <-ch:
- //log.Println("done Write")
- return
- default:
- WriteOnce(r, ioutil.Discard)
- }
- }
- }()
- //*/
-
- wg := &sync.WaitGroup{}
- wg.Add(FANOUT)
- for i := 0; i < FANOUT; i++ {
- go func(i int) {
- defer wg.Done()
- //log.Println("go", i)
- for i := 0; i < b.N; i++ {
- c.Inc(1)
- g.Update(int64(i))
- gf.Update(float64(i))
- h.Update(int64(i))
- m.Mark(1)
- t.Update(1)
- }
- //log.Println("done", i)
- }(i)
- }
- wg.Wait()
- close(ch)
- wgD.Wait()
- wgR.Wait()
- wgW.Wait()
-}
-
-func Example() {
- c := NewCounter()
- Register("money", c)
- c.Inc(17)
-
- // Threadsafe registration
- t := GetOrRegisterTimer("db.get.latency", nil)
- t.Time(func() { time.Sleep(10 * time.Millisecond) })
- t.Update(1)
-
- fmt.Println(c.Count())
- fmt.Println(t.Min())
- // Output: 17
- // 1
-}
diff --git a/metrics/opentsdb.go b/metrics/opentsdb.go
deleted file mode 100644
index 3fde55454b..0000000000
--- a/metrics/opentsdb.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package metrics
-
-import (
- "bufio"
- "fmt"
- "log"
- "net"
- "os"
- "strings"
- "time"
-)
-
-var shortHostName = ""
-
-// OpenTSDBConfig provides a container with configuration parameters for
-// the OpenTSDB exporter
-type OpenTSDBConfig struct {
- Addr *net.TCPAddr // Network address to connect to
- Registry Registry // Registry to be exported
- FlushInterval time.Duration // Flush interval
- DurationUnit time.Duration // Time conversion unit for durations
- Prefix string // Prefix to be prepended to metric names
-}
-
-// OpenTSDB is a blocking exporter function which reports metrics in r
-// to a TSDB server located at addr, flushing them every d duration
-// and prepending metric names with prefix.
-func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
- OpenTSDBWithConfig(OpenTSDBConfig{
- Addr: addr,
- Registry: r,
- FlushInterval: d,
- DurationUnit: time.Nanosecond,
- Prefix: prefix,
- })
-}
-
-// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
-// but it takes a OpenTSDBConfig instead.
-func OpenTSDBWithConfig(c OpenTSDBConfig) {
- for range time.Tick(c.FlushInterval) {
- if err := openTSDB(&c); nil != err {
- log.Println(err)
- }
- }
-}
-
-func getShortHostname() string {
- if shortHostName == "" {
- host, _ := os.Hostname()
- if index := strings.Index(host, "."); index > 0 {
- shortHostName = host[:index]
- } else {
- shortHostName = host
- }
- }
- return shortHostName
-}
-
-func openTSDB(c *OpenTSDBConfig) error {
- shortHostname := getShortHostname()
- now := time.Now().Unix()
- du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
- c.Registry.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
- case Gauge:
- fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
- case GaugeFloat64:
- fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
- fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
- fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
- fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
- fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
- fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
- }
- w.Flush()
- })
- return nil
-}
diff --git a/metrics/opentsdb_test.go b/metrics/opentsdb_test.go
deleted file mode 100644
index c43728960e..0000000000
--- a/metrics/opentsdb_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package metrics
-
-import (
- "net"
- "time"
-)
-
-func ExampleOpenTSDB() {
- addr, _ := net.ResolveTCPAddr("net", ":2003")
- go OpenTSDB(DefaultRegistry, 1*time.Second, "some.prefix", addr)
-}
-
-func ExampleOpenTSDBWithConfig() {
- addr, _ := net.ResolveTCPAddr("net", ":2003")
- go OpenTSDBWithConfig(OpenTSDBConfig{
- Addr: addr,
- Registry: DefaultRegistry,
- FlushInterval: 1 * time.Second,
- DurationUnit: time.Millisecond,
- })
-}
diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go
deleted file mode 100644
index c8737fec72..0000000000
--- a/metrics/prometheus/collector.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package prometheus
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/dominant-strategies/go-quai/metrics"
-)
-
-var (
- typeGaugeTpl = "# TYPE %s gauge\n"
- typeCounterTpl = "# TYPE %s counter\n"
- typeSummaryTpl = "# TYPE %s summary\n"
- keyValueTpl = "%s %v\n\n"
- keyQuantileTagValueTpl = "%s {quantile=\"%s\"} %v\n"
-)
-
-// collector is a collection of byte buffers that aggregate Prometheus reports
-// for different metric types.
-type collector struct {
- buff *bytes.Buffer
-}
-
-// newCollector creates a new Prometheus metric aggregator.
-func newCollector() *collector {
- return &collector{
- buff: &bytes.Buffer{},
- }
-}
-
-func (c *collector) addCounter(name string, m metrics.Counter) {
- c.writeGaugeCounter(name, m.Count())
-}
-
-func (c *collector) addGauge(name string, m metrics.Gauge) {
- c.writeGaugeCounter(name, m.Value())
-}
-
-func (c *collector) addGaugeFloat64(name string, m metrics.GaugeFloat64) {
- c.writeGaugeCounter(name, m.Value())
-}
-
-func (c *collector) addHistogram(name string, m metrics.Histogram) {
- pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
- ps := m.Percentiles(pv)
- c.writeSummaryCounter(name, m.Count())
- c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
- for i := range pv {
- c.writeSummaryPercentile(name, strconv.FormatFloat(pv[i], 'f', -1, 64), ps[i])
- }
- c.buff.WriteRune('\n')
-}
-
-func (c *collector) addMeter(name string, m metrics.Meter) {
- c.writeGaugeCounter(name, m.Count())
-}
-
-func (c *collector) addTimer(name string, m metrics.Timer) {
- pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
- ps := m.Percentiles(pv)
- c.writeSummaryCounter(name, m.Count())
- c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
- for i := range pv {
- c.writeSummaryPercentile(name, strconv.FormatFloat(pv[i], 'f', -1, 64), ps[i])
- }
- c.buff.WriteRune('\n')
-}
-
-func (c *collector) addResettingTimer(name string, m metrics.ResettingTimer) {
- if len(m.Values()) <= 0 {
- return
- }
- ps := m.Percentiles([]float64{50, 95, 99})
- val := m.Values()
- c.writeSummaryCounter(name, len(val))
- c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name)))
- c.writeSummaryPercentile(name, "0.50", ps[0])
- c.writeSummaryPercentile(name, "0.95", ps[1])
- c.writeSummaryPercentile(name, "0.99", ps[2])
- c.buff.WriteRune('\n')
-}
-
-func (c *collector) writeGaugeCounter(name string, value interface{}) {
- name = mutateKey(name)
- c.buff.WriteString(fmt.Sprintf(typeGaugeTpl, name))
- c.buff.WriteString(fmt.Sprintf(keyValueTpl, name, value))
-}
-
-func (c *collector) writeSummaryCounter(name string, value interface{}) {
- name = mutateKey(name + "_count")
- c.buff.WriteString(fmt.Sprintf(typeCounterTpl, name))
- c.buff.WriteString(fmt.Sprintf(keyValueTpl, name, value))
-}
-
-func (c *collector) writeSummaryPercentile(name, p string, value interface{}) {
- name = mutateKey(name)
- c.buff.WriteString(fmt.Sprintf(keyQuantileTagValueTpl, name, p, value))
-}
-
-func mutateKey(key string) string {
- return strings.Replace(key, "/", "_", -1)
-}
diff --git a/metrics/prometheus/collector_test.go b/metrics/prometheus/collector_test.go
deleted file mode 100644
index d082975cad..0000000000
--- a/metrics/prometheus/collector_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package prometheus
-
-import (
- "os"
- "testing"
- "time"
-
- "github.com/dominant-strategies/go-quai/metrics"
-)
-
-func TestMain(m *testing.M) {
- metrics.Enabled = true
- os.Exit(m.Run())
-}
-
-func TestCollector(t *testing.T) {
- c := newCollector()
-
- counter := metrics.NewCounter()
- counter.Inc(12345)
- c.addCounter("test/counter", counter)
-
- gauge := metrics.NewGauge()
- gauge.Update(23456)
- c.addGauge("test/gauge", gauge)
-
- gaugeFloat64 := metrics.NewGaugeFloat64()
- gaugeFloat64.Update(34567.89)
- c.addGaugeFloat64("test/gauge_float64", gaugeFloat64)
-
- histogram := metrics.NewHistogram(&metrics.NilSample{})
- c.addHistogram("test/histogram", histogram)
-
- meter := metrics.NewMeter()
- defer meter.Stop()
- meter.Mark(9999999)
- c.addMeter("test/meter", meter)
-
- timer := metrics.NewTimer()
- defer timer.Stop()
- timer.Update(20 * time.Millisecond)
- timer.Update(21 * time.Millisecond)
- timer.Update(22 * time.Millisecond)
- timer.Update(120 * time.Millisecond)
- timer.Update(23 * time.Millisecond)
- timer.Update(24 * time.Millisecond)
- c.addTimer("test/timer", timer)
-
- resettingTimer := metrics.NewResettingTimer()
- resettingTimer.Update(10 * time.Millisecond)
- resettingTimer.Update(11 * time.Millisecond)
- resettingTimer.Update(12 * time.Millisecond)
- resettingTimer.Update(120 * time.Millisecond)
- resettingTimer.Update(13 * time.Millisecond)
- resettingTimer.Update(14 * time.Millisecond)
- c.addResettingTimer("test/resetting_timer", resettingTimer.Snapshot())
-
- emptyResettingTimer := metrics.NewResettingTimer().Snapshot()
- c.addResettingTimer("test/empty_resetting_timer", emptyResettingTimer)
-
- const expectedOutput = `# TYPE test_counter gauge
-test_counter 12345
-
-# TYPE test_gauge gauge
-test_gauge 23456
-
-# TYPE test_gauge_float64 gauge
-test_gauge_float64 34567.89
-
-# TYPE test_histogram_count counter
-test_histogram_count 0
-
-# TYPE test_histogram summary
-test_histogram {quantile="0.5"} 0
-test_histogram {quantile="0.75"} 0
-test_histogram {quantile="0.95"} 0
-test_histogram {quantile="0.99"} 0
-test_histogram {quantile="0.999"} 0
-test_histogram {quantile="0.9999"} 0
-
-# TYPE test_meter gauge
-test_meter 9999999
-
-# TYPE test_timer_count counter
-test_timer_count 6
-
-# TYPE test_timer summary
-test_timer {quantile="0.5"} 2.25e+07
-test_timer {quantile="0.75"} 4.8e+07
-test_timer {quantile="0.95"} 1.2e+08
-test_timer {quantile="0.99"} 1.2e+08
-test_timer {quantile="0.999"} 1.2e+08
-test_timer {quantile="0.9999"} 1.2e+08
-
-# TYPE test_resetting_timer_count counter
-test_resetting_timer_count 6
-
-# TYPE test_resetting_timer summary
-test_resetting_timer {quantile="0.50"} 12000000
-test_resetting_timer {quantile="0.95"} 120000000
-test_resetting_timer {quantile="0.99"} 120000000
-
-`
- exp := c.buff.String()
- if exp != expectedOutput {
- t.Log("Expected Output:\n", expectedOutput)
- t.Log("Actual Output:\n", exp)
- t.Fatal("unexpected collector output")
- }
-}
diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go
deleted file mode 100644
index 44d43029a2..0000000000
--- a/metrics/prometheus/prometheus.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Package prometheus exposes go-metrics into a Prometheus format.
-package prometheus
-
-import (
- "fmt"
- "net/http"
- "sort"
-
- "github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
-)
-
-// Handler returns an HTTP handler which dump metrics in Prometheus format.
-func Handler(reg metrics.Registry) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- // Gather and pre-sort the metrics to avoid random listings
- var names []string
- reg.Each(func(name string, i interface{}) {
- names = append(names, name)
- })
- sort.Strings(names)
-
- // Aggregate all the metris into a Prometheus collector
- c := newCollector()
-
- for _, name := range names {
- i := reg.Get(name)
-
- switch m := i.(type) {
- case metrics.Counter:
- c.addCounter(name, m.Snapshot())
- case metrics.Gauge:
- c.addGauge(name, m.Snapshot())
- case metrics.GaugeFloat64:
- c.addGaugeFloat64(name, m.Snapshot())
- case metrics.Histogram:
- c.addHistogram(name, m.Snapshot())
- case metrics.Meter:
- c.addMeter(name, m.Snapshot())
- case metrics.Timer:
- c.addTimer(name, m.Snapshot())
- case metrics.ResettingTimer:
- c.addResettingTimer(name, m.Snapshot())
- default:
- log.Warn("Unknown Prometheus metric type", "type", fmt.Sprintf("%T", i))
- }
- }
- w.Header().Add("Content-Type", "text/plain")
- w.Header().Add("Content-Length", fmt.Sprint(c.buff.Len()))
- w.Write(c.buff.Bytes())
- })
-}
diff --git a/metrics/registry.go b/metrics/registry.go
deleted file mode 100644
index c5435adf24..0000000000
--- a/metrics/registry.go
+++ /dev/null
@@ -1,358 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-// DuplicateMetric is the error returned by Registry.Register when a metric
-// already exists. If you mean to Register that metric you must first
-// Unregister the existing metric.
-type DuplicateMetric string
-
-func (err DuplicateMetric) Error() string {
- return fmt.Sprintf("duplicate metric: %s", string(err))
-}
-
-// A Registry holds references to a set of metrics by name and can iterate
-// over them, calling callback functions provided by the user.
-//
-// This is an interface so as to encourage other structs to implement
-// the Registry API as appropriate.
-type Registry interface {
-
- // Call the given function for each registered metric.
- Each(func(string, interface{}))
-
- // Get the metric by the given name or nil if none is registered.
- Get(string) interface{}
-
- // GetAll metrics in the Registry.
- GetAll() map[string]map[string]interface{}
-
- // Gets an existing metric or registers the given one.
- // The interface can be the metric to register if not found in registry,
- // or a function returning the metric for lazy instantiation.
- GetOrRegister(string, interface{}) interface{}
-
- // Register the given metric under the given name.
- Register(string, interface{}) error
-
- // Run all registered healthchecks.
- RunHealthchecks()
-
- // Unregister the metric with the given name.
- Unregister(string)
-
- // Unregister all metrics. (Mostly for testing.)
- UnregisterAll()
-}
-
-// The standard implementation of a Registry is a mutex-protected map
-// of names to metrics.
-type StandardRegistry struct {
- metrics map[string]interface{}
- mutex sync.Mutex
-}
-
-// Create a new registry.
-func NewRegistry() Registry {
- return &StandardRegistry{metrics: make(map[string]interface{})}
-}
-
-// Call the given function for each registered metric.
-func (r *StandardRegistry) Each(f func(string, interface{})) {
- for name, i := range r.registered() {
- f(name, i)
- }
-}
-
-// Get the metric by the given name or nil if none is registered.
-func (r *StandardRegistry) Get(name string) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.metrics[name]
-}
-
-// Gets an existing metric or creates and registers a new one. Threadsafe
-// alternative to calling Get and Register on failure.
-// The interface can be the metric to register if not found in registry,
-// or a function returning the metric for lazy instantiation.
-func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- if metric, ok := r.metrics[name]; ok {
- return metric
- }
- if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
- i = v.Call(nil)[0].Interface()
- }
- r.register(name, i)
- return i
-}
-
-// Register the given metric under the given name. Returns a DuplicateMetric
-// if a metric by the given name is already registered.
-func (r *StandardRegistry) Register(name string, i interface{}) error {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.register(name, i)
-}
-
-// Run all registered healthchecks.
-func (r *StandardRegistry) RunHealthchecks() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for _, i := range r.metrics {
- if h, ok := i.(Healthcheck); ok {
- h.Check()
- }
- }
-}
-
-// GetAll metrics in the Registry
-func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
- data := make(map[string]map[string]interface{})
- r.Each(func(name string, i interface{}) {
- values := make(map[string]interface{})
- switch metric := i.(type) {
- case Counter:
- values["count"] = metric.Count()
- case Gauge:
- values["value"] = metric.Value()
- case GaugeFloat64:
- values["value"] = metric.Value()
- case Healthcheck:
- values["error"] = nil
- metric.Check()
- if err := metric.Error(); nil != err {
- values["error"] = metric.Error().Error()
- }
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- values["count"] = h.Count()
- values["min"] = h.Min()
- values["max"] = h.Max()
- values["mean"] = h.Mean()
- values["stddev"] = h.StdDev()
- values["median"] = ps[0]
- values["75%"] = ps[1]
- values["95%"] = ps[2]
- values["99%"] = ps[3]
- values["99.9%"] = ps[4]
- case Meter:
- m := metric.Snapshot()
- values["count"] = m.Count()
- values["1m.rate"] = m.Rate1()
- values["5m.rate"] = m.Rate5()
- values["15m.rate"] = m.Rate15()
- values["mean.rate"] = m.RateMean()
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- values["count"] = t.Count()
- values["min"] = t.Min()
- values["max"] = t.Max()
- values["mean"] = t.Mean()
- values["stddev"] = t.StdDev()
- values["median"] = ps[0]
- values["75%"] = ps[1]
- values["95%"] = ps[2]
- values["99%"] = ps[3]
- values["99.9%"] = ps[4]
- values["1m.rate"] = t.Rate1()
- values["5m.rate"] = t.Rate5()
- values["15m.rate"] = t.Rate15()
- values["mean.rate"] = t.RateMean()
- }
- data[name] = values
- })
- return data
-}
-
-// Unregister the metric with the given name.
-func (r *StandardRegistry) Unregister(name string) {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- r.stop(name)
- delete(r.metrics, name)
-}
-
-// Unregister all metrics. (Mostly for testing.)
-func (r *StandardRegistry) UnregisterAll() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for name := range r.metrics {
- r.stop(name)
- delete(r.metrics, name)
- }
-}
-
-func (r *StandardRegistry) register(name string, i interface{}) error {
- if _, ok := r.metrics[name]; ok {
- return DuplicateMetric(name)
- }
- switch i.(type) {
- case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer, ResettingTimer:
- r.metrics[name] = i
- }
- return nil
-}
-
-func (r *StandardRegistry) registered() map[string]interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- metrics := make(map[string]interface{}, len(r.metrics))
- for name, i := range r.metrics {
- metrics[name] = i
- }
- return metrics
-}
-
-func (r *StandardRegistry) stop(name string) {
- if i, ok := r.metrics[name]; ok {
- if s, ok := i.(Stoppable); ok {
- s.Stop()
- }
- }
-}
-
-// Stoppable defines the metrics which has to be stopped.
-type Stoppable interface {
- Stop()
-}
-
-type PrefixedRegistry struct {
- underlying Registry
- prefix string
-}
-
-func NewPrefixedRegistry(prefix string) Registry {
- return &PrefixedRegistry{
- underlying: NewRegistry(),
- prefix: prefix,
- }
-}
-
-func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
- return &PrefixedRegistry{
- underlying: parent,
- prefix: prefix,
- }
-}
-
-// Call the given function for each registered metric.
-func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
- wrappedFn := func(prefix string) func(string, interface{}) {
- return func(name string, iface interface{}) {
- if strings.HasPrefix(name, prefix) {
- fn(name, iface)
- } else {
- return
- }
- }
- }
-
- baseRegistry, prefix := findPrefix(r, "")
- baseRegistry.Each(wrappedFn(prefix))
-}
-
-func findPrefix(registry Registry, prefix string) (Registry, string) {
- switch r := registry.(type) {
- case *PrefixedRegistry:
- return findPrefix(r.underlying, r.prefix+prefix)
- case *StandardRegistry:
- return r, prefix
- }
- return nil, ""
-}
-
-// Get the metric by the given name or nil if none is registered.
-func (r *PrefixedRegistry) Get(name string) interface{} {
- realName := r.prefix + name
- return r.underlying.Get(realName)
-}
-
-// Gets an existing metric or registers the given one.
-// The interface can be the metric to register if not found in registry,
-// or a function returning the metric for lazy instantiation.
-func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
- realName := r.prefix + name
- return r.underlying.GetOrRegister(realName, metric)
-}
-
-// Register the given metric under the given name. The name will be prefixed.
-func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
- realName := r.prefix + name
- return r.underlying.Register(realName, metric)
-}
-
-// Run all registered healthchecks.
-func (r *PrefixedRegistry) RunHealthchecks() {
- r.underlying.RunHealthchecks()
-}
-
-// GetAll metrics in the Registry
-func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
- return r.underlying.GetAll()
-}
-
-// Unregister the metric with the given name. The name will be prefixed.
-func (r *PrefixedRegistry) Unregister(name string) {
- realName := r.prefix + name
- r.underlying.Unregister(realName)
-}
-
-// Unregister all metrics. (Mostly for testing.)
-func (r *PrefixedRegistry) UnregisterAll() {
- r.underlying.UnregisterAll()
-}
-
-var (
- DefaultRegistry = NewRegistry()
- EphemeralRegistry = NewRegistry()
- AccountingRegistry = NewRegistry() // registry used in swarm
-)
-
-// Call the given function for each registered metric.
-func Each(f func(string, interface{})) {
- DefaultRegistry.Each(f)
-}
-
-// Get the metric by the given name or nil if none is registered.
-func Get(name string) interface{} {
- return DefaultRegistry.Get(name)
-}
-
-// Gets an existing metric or creates and registers a new one. Threadsafe
-// alternative to calling Get and Register on failure.
-func GetOrRegister(name string, i interface{}) interface{} {
- return DefaultRegistry.GetOrRegister(name, i)
-}
-
-// Register the given metric under the given name. Returns a DuplicateMetric
-// if a metric by the given name is already registered.
-func Register(name string, i interface{}) error {
- return DefaultRegistry.Register(name, i)
-}
-
-// Register the given metric under the given name. Panics if a metric by the
-// given name is already registered.
-func MustRegister(name string, i interface{}) {
- if err := Register(name, i); err != nil {
- panic(err)
- }
-}
-
-// Run all registered healthchecks.
-func RunHealthchecks() {
- DefaultRegistry.RunHealthchecks()
-}
-
-// Unregister the metric with the given name.
-func Unregister(name string) {
- DefaultRegistry.Unregister(name)
-}
diff --git a/metrics/registry_test.go b/metrics/registry_test.go
deleted file mode 100644
index 6cfedfd88f..0000000000
--- a/metrics/registry_test.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package metrics
-
-import (
- "testing"
-)
-
-func BenchmarkRegistry(b *testing.B) {
- r := NewRegistry()
- r.Register("foo", NewCounter())
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- r.Each(func(string, interface{}) {})
- }
-}
-
-func TestRegistry(t *testing.T) {
- r := NewRegistry()
- r.Register("foo", NewCounter())
- i := 0
- r.Each(func(name string, iface interface{}) {
- i++
- if name != "foo" {
- t.Fatal(name)
- }
- if _, ok := iface.(Counter); !ok {
- t.Fatal(iface)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
- r.Unregister("foo")
- i = 0
- r.Each(func(string, interface{}) { i++ })
- if i != 0 {
- t.Fatal(i)
- }
-}
-
-func TestRegistryDuplicate(t *testing.T) {
- r := NewRegistry()
- if err := r.Register("foo", NewCounter()); nil != err {
- t.Fatal(err)
- }
- if err := r.Register("foo", NewGauge()); nil == err {
- t.Fatal(err)
- }
- i := 0
- r.Each(func(name string, iface interface{}) {
- i++
- if _, ok := iface.(Counter); !ok {
- t.Fatal(iface)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestRegistryGet(t *testing.T) {
- r := NewRegistry()
- r.Register("foo", NewCounter())
- if count := r.Get("foo").(Counter).Count(); count != 0 {
- t.Fatal(count)
- }
- r.Get("foo").(Counter).Inc(1)
- if count := r.Get("foo").(Counter).Count(); count != 1 {
- t.Fatal(count)
- }
-}
-
-func TestRegistryGetOrRegister(t *testing.T) {
- r := NewRegistry()
-
- // First metric wins with GetOrRegister
- _ = r.GetOrRegister("foo", NewCounter())
- m := r.GetOrRegister("foo", NewGauge())
- if _, ok := m.(Counter); !ok {
- t.Fatal(m)
- }
-
- i := 0
- r.Each(func(name string, iface interface{}) {
- i++
- if name != "foo" {
- t.Fatal(name)
- }
- if _, ok := iface.(Counter); !ok {
- t.Fatal(iface)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
- r := NewRegistry()
-
- // First metric wins with GetOrRegister
- _ = r.GetOrRegister("foo", NewCounter)
- m := r.GetOrRegister("foo", NewGauge)
- if _, ok := m.(Counter); !ok {
- t.Fatal(m)
- }
-
- i := 0
- r.Each(func(name string, iface interface{}) {
- i++
- if name != "foo" {
- t.Fatal(name)
- }
- if _, ok := iface.(Counter); !ok {
- t.Fatal(iface)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestRegistryUnregister(t *testing.T) {
- l := len(arbiter.meters)
- r := NewRegistry()
- r.Register("foo", NewCounter())
- r.Register("bar", NewMeter())
- r.Register("baz", NewTimer())
- if len(arbiter.meters) != l+2 {
- t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters))
- }
- r.Unregister("foo")
- r.Unregister("bar")
- r.Unregister("baz")
- if len(arbiter.meters) != l {
- t.Errorf("arbiter.meters: %d != %d\n", l+2, len(arbiter.meters))
- }
-}
-
-func TestPrefixedChildRegistryGetOrRegister(t *testing.T) {
- r := NewRegistry()
- pr := NewPrefixedChildRegistry(r, "prefix.")
-
- _ = pr.GetOrRegister("foo", NewCounter())
-
- i := 0
- r.Each(func(name string, m interface{}) {
- i++
- if name != "prefix.foo" {
- t.Fatal(name)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestPrefixedRegistryGetOrRegister(t *testing.T) {
- r := NewPrefixedRegistry("prefix.")
-
- _ = r.GetOrRegister("foo", NewCounter())
-
- i := 0
- r.Each(func(name string, m interface{}) {
- i++
- if name != "prefix.foo" {
- t.Fatal(name)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestPrefixedRegistryRegister(t *testing.T) {
- r := NewPrefixedRegistry("prefix.")
- err := r.Register("foo", NewCounter())
- c := NewCounter()
- Register("bar", c)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- i := 0
- r.Each(func(name string, m interface{}) {
- i++
- if name != "prefix.foo" {
- t.Fatal(name)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestPrefixedRegistryUnregister(t *testing.T) {
- r := NewPrefixedRegistry("prefix.")
-
- _ = r.Register("foo", NewCounter())
-
- i := 0
- r.Each(func(name string, m interface{}) {
- i++
- if name != "prefix.foo" {
- t.Fatal(name)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-
- r.Unregister("foo")
-
- i = 0
- r.Each(func(name string, m interface{}) {
- i++
- })
-
- if i != 0 {
- t.Fatal(i)
- }
-}
-
-func TestPrefixedRegistryGet(t *testing.T) {
- pr := NewPrefixedRegistry("prefix.")
- name := "foo"
- pr.Register(name, NewCounter())
-
- fooCounter := pr.Get(name)
- if fooCounter == nil {
- t.Fatal(name)
- }
-}
-
-func TestPrefixedChildRegistryGet(t *testing.T) {
- r := NewRegistry()
- pr := NewPrefixedChildRegistry(r, "prefix.")
- name := "foo"
- pr.Register(name, NewCounter())
- fooCounter := pr.Get(name)
- if fooCounter == nil {
- t.Fatal(name)
- }
-}
-
-func TestChildPrefixedRegistryRegister(t *testing.T) {
- r := NewPrefixedChildRegistry(DefaultRegistry, "prefix.")
- err := r.Register("foo", NewCounter())
- c := NewCounter()
- Register("bar", c)
- if err != nil {
- t.Fatal(err.Error())
- }
-
- i := 0
- r.Each(func(name string, m interface{}) {
- i++
- if name != "prefix.foo" {
- t.Fatal(name)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestChildPrefixedRegistryOfChildRegister(t *testing.T) {
- r := NewPrefixedChildRegistry(NewRegistry(), "prefix.")
- r2 := NewPrefixedChildRegistry(r, "prefix2.")
- err := r.Register("foo2", NewCounter())
- if err != nil {
- t.Fatal(err.Error())
- }
- err = r2.Register("baz", NewCounter())
- if err != nil {
- t.Fatal(err.Error())
- }
- c := NewCounter()
- Register("bars", c)
-
- i := 0
- r2.Each(func(name string, m interface{}) {
- i++
- if name != "prefix.prefix2.baz" {
- t.Fatal(name)
- }
- })
- if i != 1 {
- t.Fatal(i)
- }
-}
-
-func TestWalkRegistries(t *testing.T) {
- r := NewPrefixedChildRegistry(NewRegistry(), "prefix.")
- r2 := NewPrefixedChildRegistry(r, "prefix2.")
- err := r.Register("foo2", NewCounter())
- if err != nil {
- t.Fatal(err.Error())
- }
- err = r2.Register("baz", NewCounter())
- if err != nil {
- t.Fatal(err.Error())
- }
- c := NewCounter()
- Register("bars", c)
-
- _, prefix := findPrefix(r2, "")
- if prefix != "prefix.prefix2." {
- t.Fatal(prefix)
- }
-
-}
diff --git a/metrics/resetting_sample.go b/metrics/resetting_sample.go
deleted file mode 100644
index 43c1129cd0..0000000000
--- a/metrics/resetting_sample.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package metrics
-
-// ResettingSample converts an ordinary sample into one that resets whenever its
-// snapshot is retrieved. This will break for multi-monitor systems, but when only
-// a single metric is being pushed out, this ensure that low-frequency events don't
-// skew th charts indefinitely.
-func ResettingSample(sample Sample) Sample {
- return &resettingSample{
- Sample: sample,
- }
-}
-
-// resettingSample is a simple wrapper around a sample that resets it upon the
-// snapshot retrieval.
-type resettingSample struct {
- Sample
-}
-
-// Snapshot returns a read-only copy of the sample with the original reset.
-func (rs *resettingSample) Snapshot() Sample {
- s := rs.Sample.Snapshot()
- rs.Sample.Clear()
- return s
-}
diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go
deleted file mode 100644
index e5327d3bd3..0000000000
--- a/metrics/resetting_timer.go
+++ /dev/null
@@ -1,241 +0,0 @@
-package metrics
-
-import (
- "math"
- "sort"
- "sync"
- "time"
-)
-
-// Initial slice capacity for the values stored in a ResettingTimer
-const InitialResettingTimerSliceCap = 10
-
-// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval.
-type ResettingTimer interface {
- Values() []int64
- Snapshot() ResettingTimer
- Percentiles([]float64) []int64
- Mean() float64
- Time(func())
- Update(time.Duration)
- UpdateSince(time.Time)
-}
-
-// GetOrRegisterResettingTimer returns an existing ResettingTimer or constructs and registers a
-// new StandardResettingTimer.
-func GetOrRegisterResettingTimer(name string, r Registry) ResettingTimer {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewResettingTimer).(ResettingTimer)
-}
-
-// NewRegisteredResettingTimer constructs and registers a new StandardResettingTimer.
-func NewRegisteredResettingTimer(name string, r Registry) ResettingTimer {
- c := NewResettingTimer()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewResettingTimer constructs a new StandardResettingTimer
-func NewResettingTimer() ResettingTimer {
- if !Enabled {
- return NilResettingTimer{}
- }
- return &StandardResettingTimer{
- values: make([]int64, 0, InitialResettingTimerSliceCap),
- }
-}
-
-// NilResettingTimer is a no-op ResettingTimer.
-type NilResettingTimer struct {
-}
-
-// Values is a no-op.
-func (NilResettingTimer) Values() []int64 { return nil }
-
-// Snapshot is a no-op.
-func (NilResettingTimer) Snapshot() ResettingTimer {
- return &ResettingTimerSnapshot{
- values: []int64{},
- }
-}
-
-// Time is a no-op.
-func (NilResettingTimer) Time(func()) {}
-
-// Update is a no-op.
-func (NilResettingTimer) Update(time.Duration) {}
-
-// Percentiles panics.
-func (NilResettingTimer) Percentiles([]float64) []int64 {
- panic("Percentiles called on a NilResettingTimer")
-}
-
-// Mean panics.
-func (NilResettingTimer) Mean() float64 {
- panic("Mean called on a NilResettingTimer")
-}
-
-// UpdateSince is a no-op.
-func (NilResettingTimer) UpdateSince(time.Time) {}
-
-// StandardResettingTimer is the standard implementation of a ResettingTimer.
-// and Meter.
-type StandardResettingTimer struct {
- values []int64
- mutex sync.Mutex
-}
-
-// Values returns a slice with all measurements.
-func (t *StandardResettingTimer) Values() []int64 {
- return t.values
-}
-
-// Snapshot resets the timer and returns a read-only copy of its contents.
-func (t *StandardResettingTimer) Snapshot() ResettingTimer {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- currentValues := t.values
- t.values = make([]int64, 0, InitialResettingTimerSliceCap)
-
- return &ResettingTimerSnapshot{
- values: currentValues,
- }
-}
-
-// Percentiles panics.
-func (t *StandardResettingTimer) Percentiles([]float64) []int64 {
- panic("Percentiles called on a StandardResettingTimer")
-}
-
-// Mean panics.
-func (t *StandardResettingTimer) Mean() float64 {
- panic("Mean called on a StandardResettingTimer")
-}
-
-// Record the duration of the execution of the given function.
-func (t *StandardResettingTimer) Time(f func()) {
- ts := time.Now()
- f()
- t.Update(time.Since(ts))
-}
-
-// Record the duration of an event.
-func (t *StandardResettingTimer) Update(d time.Duration) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.values = append(t.values, int64(d))
-}
-
-// Record the duration of an event that started at a time and ends now.
-func (t *StandardResettingTimer) UpdateSince(ts time.Time) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.values = append(t.values, int64(time.Since(ts)))
-}
-
-// ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
-type ResettingTimerSnapshot struct {
- values []int64
- mean float64
- thresholdBoundaries []int64
- calculated bool
-}
-
-// Snapshot returns the snapshot.
-func (t *ResettingTimerSnapshot) Snapshot() ResettingTimer { return t }
-
-// Time panics.
-func (*ResettingTimerSnapshot) Time(func()) {
- panic("Time called on a ResettingTimerSnapshot")
-}
-
-// Update panics.
-func (*ResettingTimerSnapshot) Update(time.Duration) {
- panic("Update called on a ResettingTimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*ResettingTimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a ResettingTimerSnapshot")
-}
-
-// Values returns all values from snapshot.
-func (t *ResettingTimerSnapshot) Values() []int64 {
- return t.values
-}
-
-// Percentiles returns the boundaries for the input percentiles.
-func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []int64 {
- t.calc(percentiles)
-
- return t.thresholdBoundaries
-}
-
-// Mean returns the mean of the snapshotted values
-func (t *ResettingTimerSnapshot) Mean() float64 {
- if !t.calculated {
- t.calc([]float64{})
- }
-
- return t.mean
-}
-
-func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
- sort.Sort(Int64Slice(t.values))
-
- count := len(t.values)
- if count > 0 {
- min := t.values[0]
- max := t.values[count-1]
-
- cumulativeValues := make([]int64, count)
- cumulativeValues[0] = min
- for i := 1; i < count; i++ {
- cumulativeValues[i] = t.values[i] + cumulativeValues[i-1]
- }
-
- t.thresholdBoundaries = make([]int64, len(percentiles))
-
- thresholdBoundary := max
-
- for i, pct := range percentiles {
- if count > 1 {
- var abs float64
- if pct >= 0 {
- abs = pct
- } else {
- abs = 100 + pct
- }
- // poor man's math.Round(x):
- // math.Floor(x + 0.5)
- indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5))
- if pct >= 0 && indexOfPerc > 0 {
- indexOfPerc -= 1 // index offset=0
- }
- thresholdBoundary = t.values[indexOfPerc]
- }
-
- t.thresholdBoundaries[i] = thresholdBoundary
- }
-
- sum := cumulativeValues[count-1]
- t.mean = float64(sum) / float64(count)
- } else {
- t.thresholdBoundaries = make([]int64, len(percentiles))
- t.mean = 0
- }
-
- t.calculated = true
-}
-
-// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order.
-type Int64Slice []int64
-
-func (s Int64Slice) Len() int { return len(s) }
-func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
diff --git a/metrics/resetting_timer_test.go b/metrics/resetting_timer_test.go
deleted file mode 100644
index 77c49dc386..0000000000
--- a/metrics/resetting_timer_test.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package metrics
-
-import (
- "testing"
- "time"
-)
-
-func TestResettingTimer(t *testing.T) {
- tests := []struct {
- values []int64
- start int
- end int
- wantP50 int64
- wantP95 int64
- wantP99 int64
- wantMean float64
- wantMin int64
- wantMax int64
- }{
- {
- values: []int64{},
- start: 1,
- end: 11,
- wantP50: 5, wantP95: 10, wantP99: 10,
- wantMin: 1, wantMax: 10, wantMean: 5.5,
- },
- {
- values: []int64{},
- start: 1,
- end: 101,
- wantP50: 50, wantP95: 95, wantP99: 99,
- wantMin: 1, wantMax: 100, wantMean: 50.5,
- },
- {
- values: []int64{1},
- start: 0,
- end: 0,
- wantP50: 1, wantP95: 1, wantP99: 1,
- wantMin: 1, wantMax: 1, wantMean: 1,
- },
- {
- values: []int64{0},
- start: 0,
- end: 0,
- wantP50: 0, wantP95: 0, wantP99: 0,
- wantMin: 0, wantMax: 0, wantMean: 0,
- },
- {
- values: []int64{},
- start: 0,
- end: 0,
- wantP50: 0, wantP95: 0, wantP99: 0,
- wantMin: 0, wantMax: 0, wantMean: 0,
- },
- {
- values: []int64{1, 10},
- start: 0,
- end: 0,
- wantP50: 1, wantP95: 10, wantP99: 10,
- wantMin: 1, wantMax: 10, wantMean: 5.5,
- },
- }
- for ind, tt := range tests {
- timer := NewResettingTimer()
-
- for i := tt.start; i < tt.end; i++ {
- tt.values = append(tt.values, int64(i))
- }
-
- for _, v := range tt.values {
- timer.Update(time.Duration(v))
- }
-
- snap := timer.Snapshot()
-
- ps := snap.Percentiles([]float64{50, 95, 99})
-
- val := snap.Values()
-
- if len(val) > 0 {
- if tt.wantMin != val[0] {
- t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
- }
-
- if tt.wantMax != val[len(val)-1] {
- t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
- }
- }
-
- if tt.wantMean != snap.Mean() {
- t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
- }
-
- if tt.wantP50 != ps[0] {
- t.Fatalf("%d: p50: got %d, want %d", ind, ps[0], tt.wantP50)
- }
-
- if tt.wantP95 != ps[1] {
- t.Fatalf("%d: p95: got %d, want %d", ind, ps[1], tt.wantP95)
- }
-
- if tt.wantP99 != ps[2] {
- t.Fatalf("%d: p99: got %d, want %d", ind, ps[2], tt.wantP99)
- }
- }
-}
-
-func TestResettingTimerWithFivePercentiles(t *testing.T) {
- tests := []struct {
- values []int64
- start int
- end int
- wantP05 int64
- wantP20 int64
- wantP50 int64
- wantP95 int64
- wantP99 int64
- wantMean float64
- wantMin int64
- wantMax int64
- }{
- {
- values: []int64{},
- start: 1,
- end: 11,
- wantP05: 1, wantP20: 2, wantP50: 5, wantP95: 10, wantP99: 10,
- wantMin: 1, wantMax: 10, wantMean: 5.5,
- },
- {
- values: []int64{},
- start: 1,
- end: 101,
- wantP05: 5, wantP20: 20, wantP50: 50, wantP95: 95, wantP99: 99,
- wantMin: 1, wantMax: 100, wantMean: 50.5,
- },
- {
- values: []int64{1},
- start: 0,
- end: 0,
- wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 1, wantP99: 1,
- wantMin: 1, wantMax: 1, wantMean: 1,
- },
- {
- values: []int64{0},
- start: 0,
- end: 0,
- wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0,
- wantMin: 0, wantMax: 0, wantMean: 0,
- },
- {
- values: []int64{},
- start: 0,
- end: 0,
- wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0,
- wantMin: 0, wantMax: 0, wantMean: 0,
- },
- {
- values: []int64{1, 10},
- start: 0,
- end: 0,
- wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 10, wantP99: 10,
- wantMin: 1, wantMax: 10, wantMean: 5.5,
- },
- }
- for ind, tt := range tests {
- timer := NewResettingTimer()
-
- for i := tt.start; i < tt.end; i++ {
- tt.values = append(tt.values, int64(i))
- }
-
- for _, v := range tt.values {
- timer.Update(time.Duration(v))
- }
-
- snap := timer.Snapshot()
-
- ps := snap.Percentiles([]float64{5, 20, 50, 95, 99})
-
- val := snap.Values()
-
- if len(val) > 0 {
- if tt.wantMin != val[0] {
- t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin)
- }
-
- if tt.wantMax != val[len(val)-1] {
- t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax)
- }
- }
-
- if tt.wantMean != snap.Mean() {
- t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean)
- }
-
- if tt.wantP05 != ps[0] {
- t.Fatalf("%d: p05: got %d, want %d", ind, ps[0], tt.wantP05)
- }
-
- if tt.wantP20 != ps[1] {
- t.Fatalf("%d: p20: got %d, want %d", ind, ps[1], tt.wantP20)
- }
-
- if tt.wantP50 != ps[2] {
- t.Fatalf("%d: p50: got %d, want %d", ind, ps[2], tt.wantP50)
- }
-
- if tt.wantP95 != ps[3] {
- t.Fatalf("%d: p95: got %d, want %d", ind, ps[3], tt.wantP95)
- }
-
- if tt.wantP99 != ps[4] {
- t.Fatalf("%d: p99: got %d, want %d", ind, ps[4], tt.wantP99)
- }
- }
-}
diff --git a/metrics/runtime.go b/metrics/runtime.go
deleted file mode 100644
index 9450c479ba..0000000000
--- a/metrics/runtime.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package metrics
-
-import (
- "runtime"
- "runtime/pprof"
- "time"
-)
-
-var (
- memStats runtime.MemStats
- runtimeMetrics struct {
- MemStats struct {
- Alloc Gauge
- BuckHashSys Gauge
- DebugGC Gauge
- EnableGC Gauge
- Frees Gauge
- HeapAlloc Gauge
- HeapIdle Gauge
- HeapInuse Gauge
- HeapObjects Gauge
- HeapReleased Gauge
- HeapSys Gauge
- LastGC Gauge
- Lookups Gauge
- Mallocs Gauge
- MCacheInuse Gauge
- MCacheSys Gauge
- MSpanInuse Gauge
- MSpanSys Gauge
- NextGC Gauge
- NumGC Gauge
- GCCPUFraction GaugeFloat64
- PauseNs Histogram
- PauseTotalNs Gauge
- StackInuse Gauge
- StackSys Gauge
- Sys Gauge
- TotalAlloc Gauge
- }
- NumCgoCall Gauge
- NumGoroutine Gauge
- NumThread Gauge
- ReadMemStats Timer
- }
- frees uint64
- lookups uint64
- mallocs uint64
- numGC uint32
- numCgoCalls int64
-
- threadCreateProfile = pprof.Lookup("threadcreate")
-)
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called as a goroutine.
-func CaptureRuntimeMemStats(r Registry, d time.Duration) {
- for range time.Tick(d) {
- CaptureRuntimeMemStatsOnce(r)
- }
-}
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called in a background
-// goroutine. Giving a registry which has not been given to
-// RegisterRuntimeMemStats will panic.
-//
-// Be very careful with this because runtime.ReadMemStats calls the C
-// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
-// and that last one does what it says on the tin.
-func CaptureRuntimeMemStatsOnce(r Registry) {
- t := time.Now()
- runtime.ReadMemStats(&memStats) // This takes 50-200us.
- runtimeMetrics.ReadMemStats.UpdateSince(t)
-
- runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
- runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
- if memStats.DebugGC {
- runtimeMetrics.MemStats.DebugGC.Update(1)
- } else {
- runtimeMetrics.MemStats.DebugGC.Update(0)
- }
- if memStats.EnableGC {
- runtimeMetrics.MemStats.EnableGC.Update(1)
- } else {
- runtimeMetrics.MemStats.EnableGC.Update(0)
- }
-
- runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
- runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
- runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
- runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
- runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
- runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
- runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
- runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
- runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
- runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
- runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
- runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
- runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
- runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
- runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
- runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
- runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
-
- //
- i := numGC % uint32(len(memStats.PauseNs))
- ii := memStats.NumGC % uint32(len(memStats.PauseNs))
- if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
- for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- } else {
- if i > ii {
- for ; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- i = 0
- }
- for ; i < ii; i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- }
- frees = memStats.Frees
- lookups = memStats.Lookups
- mallocs = memStats.Mallocs
- numGC = memStats.NumGC
-
- runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
- runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
- runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
- runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
- runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
-
- currentNumCgoCalls := numCgoCall()
- runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
- numCgoCalls = currentNumCgoCalls
-
- runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
-
- runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
-}
-
-// Register runtimeMetrics for the Go runtime statistics exported in runtime and
-// specifically runtime.MemStats. The runtimeMetrics are named by their
-// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
-func RegisterRuntimeMemStats(r Registry) {
- runtimeMetrics.MemStats.Alloc = NewGauge()
- runtimeMetrics.MemStats.BuckHashSys = NewGauge()
- runtimeMetrics.MemStats.DebugGC = NewGauge()
- runtimeMetrics.MemStats.EnableGC = NewGauge()
- runtimeMetrics.MemStats.Frees = NewGauge()
- runtimeMetrics.MemStats.HeapAlloc = NewGauge()
- runtimeMetrics.MemStats.HeapIdle = NewGauge()
- runtimeMetrics.MemStats.HeapInuse = NewGauge()
- runtimeMetrics.MemStats.HeapObjects = NewGauge()
- runtimeMetrics.MemStats.HeapReleased = NewGauge()
- runtimeMetrics.MemStats.HeapSys = NewGauge()
- runtimeMetrics.MemStats.LastGC = NewGauge()
- runtimeMetrics.MemStats.Lookups = NewGauge()
- runtimeMetrics.MemStats.Mallocs = NewGauge()
- runtimeMetrics.MemStats.MCacheInuse = NewGauge()
- runtimeMetrics.MemStats.MCacheSys = NewGauge()
- runtimeMetrics.MemStats.MSpanInuse = NewGauge()
- runtimeMetrics.MemStats.MSpanSys = NewGauge()
- runtimeMetrics.MemStats.NextGC = NewGauge()
- runtimeMetrics.MemStats.NumGC = NewGauge()
- runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
- runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
- runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
- runtimeMetrics.MemStats.StackInuse = NewGauge()
- runtimeMetrics.MemStats.StackSys = NewGauge()
- runtimeMetrics.MemStats.Sys = NewGauge()
- runtimeMetrics.MemStats.TotalAlloc = NewGauge()
- runtimeMetrics.NumCgoCall = NewGauge()
- runtimeMetrics.NumGoroutine = NewGauge()
- runtimeMetrics.NumThread = NewGauge()
- runtimeMetrics.ReadMemStats = NewTimer()
-
- r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
- r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
- r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
- r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
- r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
- r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
- r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
- r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
- r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
- r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
- r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
- r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
- r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
- r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
- r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
- r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
- r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
- r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
- r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
- r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
- r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
- r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
- r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
- r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
- r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
- r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
- r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
- r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
- r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
- r.Register("runtime.NumThread", runtimeMetrics.NumThread)
- r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
-}
diff --git a/metrics/runtime_cgo.go b/metrics/runtime_cgo.go
deleted file mode 100644
index 7d0af4f7e4..0000000000
--- a/metrics/runtime_cgo.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build cgo && !appengine
-// +build cgo,!appengine
-
-package metrics
-
-import "runtime"
-
-func numCgoCall() int64 {
- return runtime.NumCgoCall()
-}
diff --git a/metrics/runtime_gccpufraction.go b/metrics/runtime_gccpufraction.go
deleted file mode 100644
index 28cd44752b..0000000000
--- a/metrics/runtime_gccpufraction.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build go1.5
-// +build go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return memStats.GCCPUFraction
-}
diff --git a/metrics/runtime_no_cgo.go b/metrics/runtime_no_cgo.go
deleted file mode 100644
index 616a3b4751..0000000000
--- a/metrics/runtime_no_cgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !cgo appengine
-
-package metrics
-
-func numCgoCall() int64 {
- return 0
-}
diff --git a/metrics/runtime_no_gccpufraction.go b/metrics/runtime_no_gccpufraction.go
deleted file mode 100644
index be96aa6f1b..0000000000
--- a/metrics/runtime_no_gccpufraction.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return 0
-}
diff --git a/metrics/runtime_test.go b/metrics/runtime_test.go
deleted file mode 100644
index f85f7868f7..0000000000
--- a/metrics/runtime_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package metrics
-
-import (
- "runtime"
- "testing"
- "time"
-)
-
-func BenchmarkRuntimeMemStats(b *testing.B) {
- r := NewRegistry()
- RegisterRuntimeMemStats(r)
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- CaptureRuntimeMemStatsOnce(r)
- }
-}
-
-func TestRuntimeMemStats(t *testing.T) {
- r := NewRegistry()
- RegisterRuntimeMemStats(r)
- CaptureRuntimeMemStatsOnce(r)
- zero := runtimeMetrics.MemStats.PauseNs.Count() // Get a "zero" since GC may have run before these tests.
- runtime.GC()
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 1 {
- t.Fatal(count - zero)
- }
- runtime.GC()
- runtime.GC()
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 3 {
- t.Fatal(count - zero)
- }
- for i := 0; i < 256; i++ {
- runtime.GC()
- }
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 259 {
- t.Fatal(count - zero)
- }
- for i := 0; i < 257; i++ {
- runtime.GC()
- }
- CaptureRuntimeMemStatsOnce(r)
- if count := runtimeMetrics.MemStats.PauseNs.Count(); count-zero != 515 { // We lost one because there were too many GCs between captures.
- t.Fatal(count - zero)
- }
-}
-
-func TestRuntimeMemStatsNumThread(t *testing.T) {
- r := NewRegistry()
- RegisterRuntimeMemStats(r)
- CaptureRuntimeMemStatsOnce(r)
-
- if value := runtimeMetrics.NumThread.Value(); value < 1 {
- t.Fatalf("got NumThread: %d, wanted at least 1", value)
- }
-}
-
-func TestRuntimeMemStatsBlocking(t *testing.T) {
- if g := runtime.GOMAXPROCS(0); g < 2 {
- t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g)
- }
- ch := make(chan int)
- go testRuntimeMemStatsBlocking(ch)
- var memStats runtime.MemStats
- t0 := time.Now()
- runtime.ReadMemStats(&memStats)
- t1 := time.Now()
- t.Log("i++ during runtime.ReadMemStats:", <-ch)
- go testRuntimeMemStatsBlocking(ch)
- d := t1.Sub(t0)
- t.Log(d)
- time.Sleep(d)
- t.Log("i++ during time.Sleep:", <-ch)
-}
-
-func testRuntimeMemStatsBlocking(ch chan int) {
- i := 0
- for {
- select {
- case ch <- i:
- return
- default:
- i++
- }
- }
-}
diff --git a/metrics/sample.go b/metrics/sample.go
deleted file mode 100644
index fa2bfb274e..0000000000
--- a/metrics/sample.go
+++ /dev/null
@@ -1,616 +0,0 @@
-package metrics
-
-import (
- "math"
- "math/rand"
- "sort"
- "sync"
- "time"
-)
-
-const rescaleThreshold = time.Hour
-
-// Samples maintain a statistically-significant selection of values from
-// a stream.
-type Sample interface {
- Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Size() int
- Snapshot() Sample
- StdDev() float64
- Sum() int64
- Update(int64)
- Values() []int64
- Variance() float64
-}
-
-// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
-// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
-// Decay Model for Streaming Systems".
-//
-//
-type ExpDecaySample struct {
- alpha float64
- count int64
- mutex sync.Mutex
- reservoirSize int
- t0, t1 time.Time
- values *expDecaySampleHeap
-}
-
-// NewExpDecaySample constructs a new exponentially-decaying sample with the
-// given reservoir size and alpha.
-func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
- if !Enabled {
- return NilSample{}
- }
- s := &ExpDecaySample{
- alpha: alpha,
- reservoirSize: reservoirSize,
- t0: time.Now(),
- values: newExpDecaySampleHeap(reservoirSize),
- }
- s.t1 = s.t0.Add(rescaleThreshold)
- return s
-}
-
-// Clear clears all samples.
-func (s *ExpDecaySample) Clear() {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count = 0
- s.t0 = time.Now()
- s.t1 = s.t0.Add(rescaleThreshold)
- s.values.Clear()
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *ExpDecaySample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Max() int64 {
- return SampleMax(s.Values())
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *ExpDecaySample) Mean() float64 {
- return SampleMean(s.Values())
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Min() int64 {
- return SampleMin(s.Values())
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *ExpDecaySample) Percentile(p float64) float64 {
- return SamplePercentile(s.Values(), p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.Values(), ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *ExpDecaySample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.values.Size()
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *ExpDecaySample) Snapshot() Sample {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *ExpDecaySample) StdDev() float64 {
- return SampleStdDev(s.Values())
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *ExpDecaySample) Sum() int64 {
- return SampleSum(s.Values())
-}
-
-// Update samples a new value.
-func (s *ExpDecaySample) Update(v int64) {
- s.update(time.Now(), v)
-}
-
-// Values returns a copy of the values in the sample.
-func (s *ExpDecaySample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *ExpDecaySample) Variance() float64 {
- return SampleVariance(s.Values())
-}
-
-// update samples a new value at a particular timestamp. This is a method all
-// its own to facilitate testing.
-func (s *ExpDecaySample) update(t time.Time, v int64) {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count++
- if s.values.Size() == s.reservoirSize {
- s.values.Pop()
- }
- s.values.Push(expDecaySample{
- k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
- v: v,
- })
- if t.After(s.t1) {
- values := s.values.Values()
- t0 := s.t0
- s.values.Clear()
- s.t0 = t
- s.t1 = s.t0.Add(rescaleThreshold)
- for _, v := range values {
- v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
- s.values.Push(v)
- }
- }
-}
-
-// NilSample is a no-op Sample.
-type NilSample struct{}
-
-// Clear is a no-op.
-func (NilSample) Clear() {}
-
-// Count is a no-op.
-func (NilSample) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilSample) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilSample) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilSample) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilSample) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilSample) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Size is a no-op.
-func (NilSample) Size() int { return 0 }
-
-// Sample is a no-op.
-func (NilSample) Snapshot() Sample { return NilSample{} }
-
-// StdDev is a no-op.
-func (NilSample) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilSample) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilSample) Update(v int64) {}
-
-// Values is a no-op.
-func (NilSample) Values() []int64 { return []int64{} }
-
-// Variance is a no-op.
-func (NilSample) Variance() float64 { return 0.0 }
-
-// SampleMax returns the maximum value of the slice of int64.
-func SampleMax(values []int64) int64 {
- if len(values) == 0 {
- return 0
- }
- var max int64 = math.MinInt64
- for _, v := range values {
- if max < v {
- max = v
- }
- }
- return max
-}
-
-// SampleMean returns the mean value of the slice of int64.
-func SampleMean(values []int64) float64 {
- if len(values) == 0 {
- return 0.0
- }
- return float64(SampleSum(values)) / float64(len(values))
-}
-
-// SampleMin returns the minimum value of the slice of int64.
-func SampleMin(values []int64) int64 {
- if len(values) == 0 {
- return 0
- }
- var min int64 = math.MaxInt64
- for _, v := range values {
- if min > v {
- min = v
- }
- }
- return min
-}
-
-// SamplePercentiles returns an arbitrary percentile of the slice of int64.
-func SamplePercentile(values int64Slice, p float64) float64 {
- return SamplePercentiles(values, []float64{p})[0]
-}
-
-// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
-// int64.
-func SamplePercentiles(values int64Slice, ps []float64) []float64 {
- scores := make([]float64, len(ps))
- size := len(values)
- if size > 0 {
- sort.Sort(values)
- for i, p := range ps {
- pos := p * float64(size+1)
- if pos < 1.0 {
- scores[i] = float64(values[0])
- } else if pos >= float64(size) {
- scores[i] = float64(values[size-1])
- } else {
- lower := float64(values[int(pos)-1])
- upper := float64(values[int(pos)])
- scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
- }
- }
- }
- return scores
-}
-
-// SampleSnapshot is a read-only copy of another Sample.
-type SampleSnapshot struct {
- count int64
- values []int64
-}
-
-func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
- return &SampleSnapshot{
- count: count,
- values: values,
- }
-}
-
-// Clear panics.
-func (*SampleSnapshot) Clear() {
- panic("Clear called on a SampleSnapshot")
-}
-
-// Count returns the count of inputs at the time the snapshot was taken.
-func (s *SampleSnapshot) Count() int64 { return s.count }
-
-// Max returns the maximal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
-
-// Min returns the minimal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
-
-// Percentile returns an arbitrary percentile of values at the time the
-// snapshot was taken.
-func (s *SampleSnapshot) Percentile(p float64) float64 {
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values at the time
-// the snapshot was taken.
-func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample at the time the snapshot was taken.
-func (s *SampleSnapshot) Size() int { return len(s.values) }
-
-// Snapshot returns the snapshot.
-func (s *SampleSnapshot) Snapshot() Sample { return s }
-
-// StdDev returns the standard deviation of values at the time the snapshot was
-// taken.
-func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
-
-// Sum returns the sum of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
-
-// Update panics.
-func (*SampleSnapshot) Update(int64) {
- panic("Update called on a SampleSnapshot")
-}
-
-// Values returns a copy of the values in the sample.
-func (s *SampleSnapshot) Values() []int64 {
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
-
-// SampleStdDev returns the standard deviation of the slice of int64.
-func SampleStdDev(values []int64) float64 {
- return math.Sqrt(SampleVariance(values))
-}
-
-// SampleSum returns the sum of the slice of int64.
-func SampleSum(values []int64) int64 {
- var sum int64
- for _, v := range values {
- sum += v
- }
- return sum
-}
-
-// SampleVariance returns the variance of the slice of int64.
-func SampleVariance(values []int64) float64 {
- if len(values) == 0 {
- return 0.0
- }
- m := SampleMean(values)
- var sum float64
- for _, v := range values {
- d := float64(v) - m
- sum += d * d
- }
- return sum / float64(len(values))
-}
-
-// A uniform sample using Vitter's Algorithm R.
-//
-//
-type UniformSample struct {
- count int64
- mutex sync.Mutex
- reservoirSize int
- values []int64
-}
-
-// NewUniformSample constructs a new uniform sample with the given reservoir
-// size.
-func NewUniformSample(reservoirSize int) Sample {
- if !Enabled {
- return NilSample{}
- }
- return &UniformSample{
- reservoirSize: reservoirSize,
- values: make([]int64, 0, reservoirSize),
- }
-}
-
-// Clear clears all samples.
-func (s *UniformSample) Clear() {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count = 0
- s.values = make([]int64, 0, s.reservoirSize)
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *UniformSample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *UniformSample) Max() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMax(s.values)
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *UniformSample) Mean() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMean(s.values)
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *UniformSample) Min() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMin(s.values)
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *UniformSample) Percentile(p float64) float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *UniformSample) Percentiles(ps []float64) []float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *UniformSample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return len(s.values)
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *UniformSample) Snapshot() Sample {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *UniformSample) StdDev() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleStdDev(s.values)
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *UniformSample) Sum() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleSum(s.values)
-}
-
-// Update samples a new value.
-func (s *UniformSample) Update(v int64) {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count++
- if len(s.values) < s.reservoirSize {
- s.values = append(s.values, v)
- } else {
- r := rand.Int63n(s.count)
- if r < int64(len(s.values)) {
- s.values[int(r)] = v
- }
- }
-}
-
-// Values returns a copy of the values in the sample.
-func (s *UniformSample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *UniformSample) Variance() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleVariance(s.values)
-}
-
-// expDecaySample represents an individual sample in a heap.
-type expDecaySample struct {
- k float64
- v int64
-}
-
-func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
- return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
-}
-
-// expDecaySampleHeap is a min-heap of expDecaySamples.
-// The internal implementation is copied from the standard library's container/heap
-type expDecaySampleHeap struct {
- s []expDecaySample
-}
-
-func (h *expDecaySampleHeap) Clear() {
- h.s = h.s[:0]
-}
-
-func (h *expDecaySampleHeap) Push(s expDecaySample) {
- n := len(h.s)
- h.s = h.s[0 : n+1]
- h.s[n] = s
- h.up(n)
-}
-
-func (h *expDecaySampleHeap) Pop() expDecaySample {
- n := len(h.s) - 1
- h.s[0], h.s[n] = h.s[n], h.s[0]
- h.down(0, n)
-
- n = len(h.s)
- s := h.s[n-1]
- h.s = h.s[0 : n-1]
- return s
-}
-
-func (h *expDecaySampleHeap) Size() int {
- return len(h.s)
-}
-
-func (h *expDecaySampleHeap) Values() []expDecaySample {
- return h.s
-}
-
-func (h *expDecaySampleHeap) up(j int) {
- for {
- i := (j - 1) / 2 // parent
- if i == j || !(h.s[j].k < h.s[i].k) {
- break
- }
- h.s[i], h.s[j] = h.s[j], h.s[i]
- j = i
- }
-}
-
-func (h *expDecaySampleHeap) down(i, n int) {
- for {
- j1 := 2*i + 1
- if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
- break
- }
- j := j1 // left child
- if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
- j = j2 // = 2*i + 2 // right child
- }
- if !(h.s[j].k < h.s[i].k) {
- break
- }
- h.s[i], h.s[j] = h.s[j], h.s[i]
- i = j
- }
-}
-
-type int64Slice []int64
-
-func (p int64Slice) Len() int { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/metrics/sample_test.go b/metrics/sample_test.go
deleted file mode 100644
index c9168d3e82..0000000000
--- a/metrics/sample_test.go
+++ /dev/null
@@ -1,365 +0,0 @@
-package metrics
-
-import (
- "math"
- "math/rand"
- "runtime"
- "testing"
- "time"
-)
-
-// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
-// expensive computations like Variance, the cost of copying the Sample, as
-// approximated by a make and copy, is much greater than the cost of the
-// computation for small samples and only slightly less for large samples.
-func BenchmarkCompute1000(b *testing.B) {
- s := make([]int64, 1000)
- for i := 0; i < len(s); i++ {
- s[i] = int64(i)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- SampleVariance(s)
- }
-}
-func BenchmarkCompute1000000(b *testing.B) {
- s := make([]int64, 1000000)
- for i := 0; i < len(s); i++ {
- s[i] = int64(i)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- SampleVariance(s)
- }
-}
-func BenchmarkCopy1000(b *testing.B) {
- s := make([]int64, 1000)
- for i := 0; i < len(s); i++ {
- s[i] = int64(i)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- sCopy := make([]int64, len(s))
- copy(sCopy, s)
- }
-}
-func BenchmarkCopy1000000(b *testing.B) {
- s := make([]int64, 1000000)
- for i := 0; i < len(s); i++ {
- s[i] = int64(i)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- sCopy := make([]int64, len(s))
- copy(sCopy, s)
- }
-}
-
-func BenchmarkExpDecaySample257(b *testing.B) {
- benchmarkSample(b, NewExpDecaySample(257, 0.015))
-}
-
-func BenchmarkExpDecaySample514(b *testing.B) {
- benchmarkSample(b, NewExpDecaySample(514, 0.015))
-}
-
-func BenchmarkExpDecaySample1028(b *testing.B) {
- benchmarkSample(b, NewExpDecaySample(1028, 0.015))
-}
-
-func BenchmarkUniformSample257(b *testing.B) {
- benchmarkSample(b, NewUniformSample(257))
-}
-
-func BenchmarkUniformSample514(b *testing.B) {
- benchmarkSample(b, NewUniformSample(514))
-}
-
-func BenchmarkUniformSample1028(b *testing.B) {
- benchmarkSample(b, NewUniformSample(1028))
-}
-
-func TestExpDecaySample10(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
- for i := 0; i < 10; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); size != 10 {
- t.Errorf("s.Count(): 10 != %v\n", size)
- }
- if size := s.Size(); size != 10 {
- t.Errorf("s.Size(): 10 != %v\n", size)
- }
- if l := len(s.Values()); l != 10 {
- t.Errorf("len(s.Values()): 10 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 10 || v < 0 {
- t.Errorf("out of range [0, 10): %v\n", v)
- }
- }
-}
-
-func TestExpDecaySample100(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(1000, 0.01)
- for i := 0; i < 100; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); size != 100 {
- t.Errorf("s.Count(): 100 != %v\n", size)
- }
- if size := s.Size(); size != 100 {
- t.Errorf("s.Size(): 100 != %v\n", size)
- }
- if l := len(s.Values()); l != 100 {
- t.Errorf("len(s.Values()): 100 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 100 || v < 0 {
- t.Errorf("out of range [0, 100): %v\n", v)
- }
- }
-}
-
-func TestExpDecaySample1000(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
- for i := 0; i < 1000; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); size != 1000 {
- t.Errorf("s.Count(): 1000 != %v\n", size)
- }
- if size := s.Size(); size != 100 {
- t.Errorf("s.Size(): 100 != %v\n", size)
- }
- if l := len(s.Values()); l != 100 {
- t.Errorf("len(s.Values()): 100 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 1000 || v < 0 {
- t.Errorf("out of range [0, 1000): %v\n", v)
- }
- }
-}
-
-// This test makes sure that the sample's priority is not amplified by using
-// nanosecond duration since start rather than second duration since start.
-// The priority becomes +Inf quickly after starting if this is done,
-// effectively freezing the set of samples until a rescale step happens.
-func TestExpDecaySampleNanosecondRegression(t *testing.T) {
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
- for i := 0; i < 100; i++ {
- s.Update(10)
- }
- time.Sleep(1 * time.Millisecond)
- for i := 0; i < 100; i++ {
- s.Update(20)
- }
- v := s.Values()
- avg := float64(0)
- for i := 0; i < len(v); i++ {
- avg += float64(v[i])
- }
- avg /= float64(len(v))
- if avg > 16 || avg < 14 {
- t.Errorf("out of range [14, 16]: %v\n", avg)
- }
-}
-
-func TestExpDecaySampleRescale(t *testing.T) {
- s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
- s.update(time.Now(), 1)
- s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
- for _, v := range s.values.Values() {
- if v.k == 0.0 {
- t.Fatal("v.k == 0.0")
- }
- }
-}
-
-func TestExpDecaySampleSnapshot(t *testing.T) {
- now := time.Now()
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
- for i := 1; i <= 10000; i++ {
- s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
- }
- snapshot := s.Snapshot()
- s.Update(1)
- testExpDecaySampleStatistics(t, snapshot)
-}
-
-func TestExpDecaySampleStatistics(t *testing.T) {
- now := time.Now()
- rand.Seed(1)
- s := NewExpDecaySample(100, 0.99)
- for i := 1; i <= 10000; i++ {
- s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
- }
- testExpDecaySampleStatistics(t, s)
-}
-
-func TestUniformSample(t *testing.T) {
- rand.Seed(1)
- s := NewUniformSample(100)
- for i := 0; i < 1000; i++ {
- s.Update(int64(i))
- }
- if size := s.Count(); size != 1000 {
- t.Errorf("s.Count(): 1000 != %v\n", size)
- }
- if size := s.Size(); size != 100 {
- t.Errorf("s.Size(): 100 != %v\n", size)
- }
- if l := len(s.Values()); l != 100 {
- t.Errorf("len(s.Values()): 100 != %v\n", l)
- }
- for _, v := range s.Values() {
- if v > 1000 || v < 0 {
- t.Errorf("out of range [0, 100): %v\n", v)
- }
- }
-}
-
-func TestUniformSampleIncludesTail(t *testing.T) {
- rand.Seed(1)
- s := NewUniformSample(100)
- max := 100
- for i := 0; i < max; i++ {
- s.Update(int64(i))
- }
- v := s.Values()
- sum := 0
- exp := (max - 1) * max / 2
- for i := 0; i < len(v); i++ {
- sum += int(v[i])
- }
- if exp != sum {
- t.Errorf("sum: %v != %v\n", exp, sum)
- }
-}
-
-func TestUniformSampleSnapshot(t *testing.T) {
- s := NewUniformSample(100)
- for i := 1; i <= 10000; i++ {
- s.Update(int64(i))
- }
- snapshot := s.Snapshot()
- s.Update(1)
- testUniformSampleStatistics(t, snapshot)
-}
-
-func TestUniformSampleStatistics(t *testing.T) {
- rand.Seed(1)
- s := NewUniformSample(100)
- for i := 1; i <= 10000; i++ {
- s.Update(int64(i))
- }
- testUniformSampleStatistics(t, s)
-}
-
-func benchmarkSample(b *testing.B, s Sample) {
- var memStats runtime.MemStats
- runtime.ReadMemStats(&memStats)
- pauseTotalNs := memStats.PauseTotalNs
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- s.Update(1)
- }
- b.StopTimer()
- runtime.GC()
- runtime.ReadMemStats(&memStats)
- b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
-}
-
-func testExpDecaySampleStatistics(t *testing.T, s Sample) {
- if count := s.Count(); count != 10000 {
- t.Errorf("s.Count(): 10000 != %v\n", count)
- }
- if min := s.Min(); min != 107 {
- t.Errorf("s.Min(): 107 != %v\n", min)
- }
- if max := s.Max(); max != 10000 {
- t.Errorf("s.Max(): 10000 != %v\n", max)
- }
- if mean := s.Mean(); mean != 4965.98 {
- t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
- }
- if stdDev := s.StdDev(); stdDev != 2959.825156930727 {
- t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
- }
- ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
- if ps[0] != 4615 {
- t.Errorf("median: 4615 != %v\n", ps[0])
- }
- if ps[1] != 7672 {
- t.Errorf("75th percentile: 7672 != %v\n", ps[1])
- }
- if ps[2] != 9998.99 {
- t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
- }
-}
-
-func testUniformSampleStatistics(t *testing.T, s Sample) {
- if count := s.Count(); count != 10000 {
- t.Errorf("s.Count(): 10000 != %v\n", count)
- }
- if min := s.Min(); min != 37 {
- t.Errorf("s.Min(): 37 != %v\n", min)
- }
- if max := s.Max(); max != 9989 {
- t.Errorf("s.Max(): 9989 != %v\n", max)
- }
- if mean := s.Mean(); mean != 4748.14 {
- t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
- }
- if stdDev := s.StdDev(); stdDev != 2826.684117548333 {
- t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
- }
- ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
- if ps[0] != 4599 {
- t.Errorf("median: 4599 != %v\n", ps[0])
- }
- if ps[1] != 7380.5 {
- t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
- }
- if math.Abs(9986.429999999998-ps[2]) > epsilonPercentile {
- t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
- }
-}
-
-// TestUniformSampleConcurrentUpdateCount would expose data race problems with
-// concurrent Update and Count calls on Sample when test is called with -race
-// argument
-func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping in short mode")
- }
- s := NewUniformSample(100)
- for i := 0; i < 100; i++ {
- s.Update(int64(i))
- }
- quit := make(chan struct{})
- go func() {
- t := time.NewTicker(10 * time.Millisecond)
- defer t.Stop()
- for {
- select {
- case <-t.C:
- s.Update(rand.Int63())
- case <-quit:
- t.Stop()
- return
- }
- }
- }()
- for i := 0; i < 1000; i++ {
- s.Count()
- time.Sleep(5 * time.Millisecond)
- }
- quit <- struct{}{}
-}
diff --git a/metrics/syslog.go b/metrics/syslog.go
deleted file mode 100644
index 551a2bd0f0..0000000000
--- a/metrics/syslog.go
+++ /dev/null
@@ -1,79 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package metrics
-
-import (
- "fmt"
- "log/syslog"
- "time"
-)
-
-// Output each metric in the given registry to syslog periodically using
-// the given syslogger.
-func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
- for range time.Tick(d) {
- r.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
- case Gauge:
- w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
- case GaugeFloat64:
- w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
- case Healthcheck:
- metric.Check()
- w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- w.Info(fmt.Sprintf(
- "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
- name,
- h.Count(),
- h.Min(),
- h.Max(),
- h.Mean(),
- h.StdDev(),
- ps[0],
- ps[1],
- ps[2],
- ps[3],
- ps[4],
- ))
- case Meter:
- m := metric.Snapshot()
- w.Info(fmt.Sprintf(
- "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
- name,
- m.Count(),
- m.Rate1(),
- m.Rate5(),
- m.Rate15(),
- m.RateMean(),
- ))
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- w.Info(fmt.Sprintf(
- "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
- name,
- t.Count(),
- t.Min(),
- t.Max(),
- t.Mean(),
- t.StdDev(),
- ps[0],
- ps[1],
- ps[2],
- ps[3],
- ps[4],
- t.Rate1(),
- t.Rate5(),
- t.Rate15(),
- t.RateMean(),
- ))
- }
- })
- }
-}
diff --git a/metrics/timer.go b/metrics/timer.go
deleted file mode 100644
index a63c9dfb6c..0000000000
--- a/metrics/timer.go
+++ /dev/null
@@ -1,326 +0,0 @@
-package metrics
-
-import (
- "sync"
- "time"
-)
-
-// Timers capture the duration and rate of events.
-type Timer interface {
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Timer
- StdDev() float64
- Stop()
- Sum() int64
- Time(func())
- Update(time.Duration)
- UpdateSince(time.Time)
- Variance() float64
-}
-
-// GetOrRegisterTimer returns an existing Timer or constructs and registers a
-// new StandardTimer.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func GetOrRegisterTimer(name string, r Registry) Timer {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewTimer).(Timer)
-}
-
-// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
-// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
-func NewCustomTimer(h Histogram, m Meter) Timer {
- if !Enabled {
- return NilTimer{}
- }
- return &StandardTimer{
- histogram: h,
- meter: m,
- }
-}
-
-// NewRegisteredTimer constructs and registers a new StandardTimer.
-// Be sure to unregister the meter from the registry once it is of no use to
-// allow for garbage collection.
-func NewRegisteredTimer(name string, r Registry) Timer {
- c := NewTimer()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewTimer constructs a new StandardTimer using an exponentially-decaying
-// sample with the same reservoir size and alpha as UNIX load averages.
-// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
-func NewTimer() Timer {
- if !Enabled {
- return NilTimer{}
- }
- return &StandardTimer{
- histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
- meter: NewMeter(),
- }
-}
-
-// NilTimer is a no-op Timer.
-type NilTimer struct{}
-
-// Count is a no-op.
-func (NilTimer) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilTimer) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilTimer) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilTimer) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilTimer) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilTimer) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Rate1 is a no-op.
-func (NilTimer) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilTimer) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilTimer) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilTimer) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilTimer) Snapshot() Timer { return NilTimer{} }
-
-// StdDev is a no-op.
-func (NilTimer) StdDev() float64 { return 0.0 }
-
-// Stop is a no-op.
-func (NilTimer) Stop() {}
-
-// Sum is a no-op.
-func (NilTimer) Sum() int64 { return 0 }
-
-// Time is a no-op.
-func (NilTimer) Time(func()) {}
-
-// Update is a no-op.
-func (NilTimer) Update(time.Duration) {}
-
-// UpdateSince is a no-op.
-func (NilTimer) UpdateSince(time.Time) {}
-
-// Variance is a no-op.
-func (NilTimer) Variance() float64 { return 0.0 }
-
-// StandardTimer is the standard implementation of a Timer and uses a Histogram
-// and Meter.
-type StandardTimer struct {
- histogram Histogram
- meter Meter
- mutex sync.Mutex
-}
-
-// Count returns the number of events recorded.
-func (t *StandardTimer) Count() int64 {
- return t.histogram.Count()
-}
-
-// Max returns the maximum value in the sample.
-func (t *StandardTimer) Max() int64 {
- return t.histogram.Max()
-}
-
-// Mean returns the mean of the values in the sample.
-func (t *StandardTimer) Mean() float64 {
- return t.histogram.Mean()
-}
-
-// Min returns the minimum value in the sample.
-func (t *StandardTimer) Min() int64 {
- return t.histogram.Min()
-}
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (t *StandardTimer) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (t *StandardTimer) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (t *StandardTimer) Rate1() float64 {
- return t.meter.Rate1()
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (t *StandardTimer) Rate5() float64 {
- return t.meter.Rate5()
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (t *StandardTimer) Rate15() float64 {
- return t.meter.Rate15()
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (t *StandardTimer) RateMean() float64 {
- return t.meter.RateMean()
-}
-
-// Snapshot returns a read-only copy of the timer.
-func (t *StandardTimer) Snapshot() Timer {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- return &TimerSnapshot{
- histogram: t.histogram.Snapshot().(*HistogramSnapshot),
- meter: t.meter.Snapshot().(*MeterSnapshot),
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (t *StandardTimer) StdDev() float64 {
- return t.histogram.StdDev()
-}
-
-// Stop stops the meter.
-func (t *StandardTimer) Stop() {
- t.meter.Stop()
-}
-
-// Sum returns the sum in the sample.
-func (t *StandardTimer) Sum() int64 {
- return t.histogram.Sum()
-}
-
-// Record the duration of the execution of the given function.
-func (t *StandardTimer) Time(f func()) {
- ts := time.Now()
- f()
- t.Update(time.Since(ts))
-}
-
-// Record the duration of an event.
-func (t *StandardTimer) Update(d time.Duration) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.histogram.Update(int64(d))
- t.meter.Mark(1)
-}
-
-// Record the duration of an event that started at a time and ends now.
-func (t *StandardTimer) UpdateSince(ts time.Time) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.histogram.Update(int64(time.Since(ts)))
- t.meter.Mark(1)
-}
-
-// Variance returns the variance of the values in the sample.
-func (t *StandardTimer) Variance() float64 {
- return t.histogram.Variance()
-}
-
-// TimerSnapshot is a read-only copy of another Timer.
-type TimerSnapshot struct {
- histogram *HistogramSnapshot
- meter *MeterSnapshot
-}
-
-// Count returns the number of events recorded at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
-
-// Max returns the maximum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
-
-// Min returns the minimum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
-
-// Percentile returns an arbitrary percentile of sampled values at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of sampled values at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
-
-// Snapshot returns the snapshot.
-func (t *TimerSnapshot) Snapshot() Timer { return t }
-
-// StdDev returns the standard deviation of the values at the time the snapshot
-// was taken.
-func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
-
-// Stop is a no-op.
-func (t *TimerSnapshot) Stop() {}
-
-// Sum returns the sum at the time the snapshot was taken.
-func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
-
-// Time panics.
-func (*TimerSnapshot) Time(func()) {
- panic("Time called on a TimerSnapshot")
-}
-
-// Update panics.
-func (*TimerSnapshot) Update(time.Duration) {
- panic("Update called on a TimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*TimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a TimerSnapshot")
-}
-
-// Variance returns the variance of the values at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/metrics/timer_test.go b/metrics/timer_test.go
deleted file mode 100644
index 903e8e8d49..0000000000
--- a/metrics/timer_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "math"
- "testing"
- "time"
-)
-
-func BenchmarkTimer(b *testing.B) {
- tm := NewTimer()
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- tm.Update(1)
- }
-}
-
-func TestGetOrRegisterTimer(t *testing.T) {
- r := NewRegistry()
- NewRegisteredTimer("foo", r).Update(47)
- if tm := GetOrRegisterTimer("foo", r); tm.Count() != 1 {
- t.Fatal(tm)
- }
-}
-
-func TestTimerExtremes(t *testing.T) {
- tm := NewTimer()
- tm.Update(math.MaxInt64)
- tm.Update(0)
- if stdDev := tm.StdDev(); stdDev != 4.611686018427388e+18 {
- t.Errorf("tm.StdDev(): 4.611686018427388e+18 != %v\n", stdDev)
- }
-}
-
-func TestTimerStop(t *testing.T) {
- l := len(arbiter.meters)
- tm := NewTimer()
- if l+1 != len(arbiter.meters) {
- t.Errorf("arbiter.meters: %d != %d\n", l+1, len(arbiter.meters))
- }
- tm.Stop()
- if l != len(arbiter.meters) {
- t.Errorf("arbiter.meters: %d != %d\n", l, len(arbiter.meters))
- }
-}
-
-func TestTimerFunc(t *testing.T) {
- var (
- tm = NewTimer()
- testStart = time.Now()
- actualTime time.Duration
- )
- tm.Time(func() {
- time.Sleep(50 * time.Millisecond)
- actualTime = time.Since(testStart)
- })
- var (
- drift = time.Millisecond * 2
- measured = time.Duration(tm.Max())
- ceil = actualTime + drift
- floor = actualTime - drift
- )
- if measured > ceil || measured < floor {
- t.Errorf("tm.Max(): %v > %v || %v > %v\n", measured, ceil, measured, floor)
- }
-}
-
-func TestTimerZero(t *testing.T) {
- tm := NewTimer()
- if count := tm.Count(); count != 0 {
- t.Errorf("tm.Count(): 0 != %v\n", count)
- }
- if min := tm.Min(); min != 0 {
- t.Errorf("tm.Min(): 0 != %v\n", min)
- }
- if max := tm.Max(); max != 0 {
- t.Errorf("tm.Max(): 0 != %v\n", max)
- }
- if mean := tm.Mean(); mean != 0.0 {
- t.Errorf("tm.Mean(): 0.0 != %v\n", mean)
- }
- if stdDev := tm.StdDev(); stdDev != 0.0 {
- t.Errorf("tm.StdDev(): 0.0 != %v\n", stdDev)
- }
- ps := tm.Percentiles([]float64{0.5, 0.75, 0.99})
- if ps[0] != 0.0 {
- t.Errorf("median: 0.0 != %v\n", ps[0])
- }
- if ps[1] != 0.0 {
- t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
- }
- if ps[2] != 0.0 {
- t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
- }
- if rate1 := tm.Rate1(); rate1 != 0.0 {
- t.Errorf("tm.Rate1(): 0.0 != %v\n", rate1)
- }
- if rate5 := tm.Rate5(); rate5 != 0.0 {
- t.Errorf("tm.Rate5(): 0.0 != %v\n", rate5)
- }
- if rate15 := tm.Rate15(); rate15 != 0.0 {
- t.Errorf("tm.Rate15(): 0.0 != %v\n", rate15)
- }
- if rateMean := tm.RateMean(); rateMean != 0.0 {
- t.Errorf("tm.RateMean(): 0.0 != %v\n", rateMean)
- }
-}
-
-func ExampleGetOrRegisterTimer() {
- m := "account.create.latency"
- t := GetOrRegisterTimer(m, nil)
- t.Update(47)
- fmt.Println(t.Max()) // Output: 47
-}
diff --git a/metrics/validate.sh b/metrics/validate.sh
deleted file mode 100755
index c4ae91e642..0000000000
--- a/metrics/validate.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# check there are no formatting issues
-GOFMT_LINES=`gofmt -l . | wc -l | xargs`
-test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
-
-# run the tests for the root package
-go test -race .
diff --git a/metrics/writer.go b/metrics/writer.go
deleted file mode 100644
index 88521a80d9..0000000000
--- a/metrics/writer.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "io"
- "sort"
- "time"
-)
-
-// Write sorts writes each metric in the given registry periodically to the
-// given io.Writer.
-func Write(r Registry, d time.Duration, w io.Writer) {
- for range time.Tick(d) {
- WriteOnce(r, w)
- }
-}
-
-// WriteOnce sorts and writes metrics in the given registry to the given
-// io.Writer.
-func WriteOnce(r Registry, w io.Writer) {
- var namedMetrics namedMetricSlice
- r.Each(func(name string, i interface{}) {
- namedMetrics = append(namedMetrics, namedMetric{name, i})
- })
-
- sort.Sort(namedMetrics)
- for _, namedMetric := range namedMetrics {
- switch metric := namedMetric.m.(type) {
- case Counter:
- fmt.Fprintf(w, "counter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", metric.Count())
- case Gauge:
- fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %9d\n", metric.Value())
- case GaugeFloat64:
- fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %f\n", metric.Value())
- case Healthcheck:
- metric.Check()
- fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
- fmt.Fprintf(w, " error: %v\n", metric.Error())
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", h.Count())
- fmt.Fprintf(w, " min: %9d\n", h.Min())
- fmt.Fprintf(w, " max: %9d\n", h.Max())
- fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
- fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
- fmt.Fprintf(w, " median: %12.2f\n", ps[0])
- fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
- fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
- fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
- fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "meter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", m.Count())
- fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
- fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
- fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
- fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "timer %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", t.Count())
- fmt.Fprintf(w, " min: %9d\n", t.Min())
- fmt.Fprintf(w, " max: %9d\n", t.Max())
- fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
- fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
- fmt.Fprintf(w, " median: %12.2f\n", ps[0])
- fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
- fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
- fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
- fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
- fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
- fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
- fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
- fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
- }
- }
-}
-
-type namedMetric struct {
- name string
- m interface{}
-}
-
-// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
-type namedMetricSlice []namedMetric
-
-func (nms namedMetricSlice) Len() int { return len(nms) }
-
-func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
-
-func (nms namedMetricSlice) Less(i, j int) bool {
- return nms[i].name < nms[j].name
-}
diff --git a/metrics/writer_test.go b/metrics/writer_test.go
deleted file mode 100644
index 1aacc28712..0000000000
--- a/metrics/writer_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package metrics
-
-import (
- "sort"
- "testing"
-)
-
-func TestMetricsSorting(t *testing.T) {
- var namedMetrics = namedMetricSlice{
- {name: "zzz"},
- {name: "bbb"},
- {name: "fff"},
- {name: "ggg"},
- }
-
- sort.Sort(namedMetrics)
- for i, name := range []string{"bbb", "fff", "ggg", "zzz"} {
- if namedMetrics[i].name != name {
- t.Fail()
- }
- }
-}
diff --git a/p2p/metrics.go b/p2p/metrics.go
index 5912f7ed63..9ed8b0f2fb 100644
--- a/p2p/metrics.go
+++ b/p2p/metrics.go
@@ -20,8 +20,6 @@ package p2p
import (
"net"
-
- "github.com/dominant-strategies/go-quai/metrics"
)
const (
@@ -36,11 +34,7 @@ const (
)
var (
- ingressConnectMeter = metrics.NewRegisteredMeter("p2p/serves", nil)
- ingressTrafficMeter = metrics.NewRegisteredMeter(ingressMeterName, nil)
- egressConnectMeter = metrics.NewRegisteredMeter("p2p/dials", nil)
- egressTrafficMeter = metrics.NewRegisteredMeter(egressMeterName, nil)
- activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil)
+
)
// meteredConn is a wrapper around a net.Conn that meters both the
@@ -53,17 +47,10 @@ type meteredConn struct {
// connection meter and also increases the metered peer count. If the metrics
// system is disabled, function returns the original connection.
func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn {
- // Short circuit if metrics are disabled
- if !metrics.Enabled {
- return conn
- }
// Bump the connection counters and wrap the connection
if ingress {
- ingressConnectMeter.Mark(1)
} else {
- egressConnectMeter.Mark(1)
}
- activePeerGauge.Inc(1)
return &meteredConn{Conn: conn}
}
@@ -71,7 +58,6 @@ func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn {
// and the peer ingress traffic meters along the way.
func (c *meteredConn) Read(b []byte) (n int, err error) {
n, err = c.Conn.Read(b)
- ingressTrafficMeter.Mark(int64(n))
return n, err
}
@@ -79,7 +65,6 @@ func (c *meteredConn) Read(b []byte) (n int, err error) {
// and the peer egress traffic meters along the way.
func (c *meteredConn) Write(b []byte) (n int, err error) {
n, err = c.Conn.Write(b)
- egressTrafficMeter.Mark(int64(n))
return n, err
}
@@ -88,7 +73,6 @@ func (c *meteredConn) Write(b []byte) (n int, err error) {
func (c *meteredConn) Close() error {
err := c.Conn.Close()
if err == nil {
- activePeerGauge.Dec(1)
}
return err
}
diff --git a/p2p/peer.go b/p2p/peer.go
index d2f0f06692..7bb4d179f0 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -28,7 +28,6 @@ import (
"github.com/dominant-strategies/go-quai/common/mclock"
"github.com/dominant-strategies/go-quai/event"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
"github.com/dominant-strategies/go-quai/p2p/enode"
"github.com/dominant-strategies/go-quai/p2p/enr"
"github.com/dominant-strategies/go-quai/rlp"
@@ -342,11 +341,7 @@ func (p *Peer) handle(msg Msg) error {
if err != nil {
return fmt.Errorf("msg code out of range: %v", msg.Code)
}
- if metrics.Enabled {
- m := fmt.Sprintf("%s/%s/%d/%#02x", ingressMeterName, proto.Name, proto.Version, msg.Code-proto.offset)
- metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize))
- metrics.GetOrRegisterMeter(m+"/packets", nil).Mark(1)
- }
+
select {
case proto.in <- msg:
return nil
diff --git a/p2p/tracker/tracker.go b/p2p/tracker/tracker.go
index 65c23db493..9aa88734ed 100644
--- a/p2p/tracker/tracker.go
+++ b/p2p/tracker/tracker.go
@@ -23,7 +23,7 @@ import (
"time"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+ "github.com/dominant-strategies/go-quai/metrics_config"
)
const (
@@ -84,7 +84,7 @@ func New(protocol string, timeout time.Duration) *Tracker {
// Track adds a network request to the tracker to wait for a response to arrive
// or until the request it cancelled or times out.
func (t *Tracker) Track(peer string, version uint, reqCode uint64, resCode uint64, id uint64) {
- if !metrics.Enabled {
+ if metrics_config.MetricsEnabled() {
return
}
t.lock.Lock()
@@ -111,8 +111,6 @@ func (t *Tracker) Track(peer string, version uint, reqCode uint64, resCode uint6
time: time.Now(),
expire: t.expire.PushBack(id),
}
- g := fmt.Sprintf("%s/%s/%d/%#02x", trackedGaugeName, t.protocol, version, reqCode)
- metrics.GetOrRegisterGauge(g, nil).Inc(1)
// If we've just inserted the first item, start the expiration timer
if t.wake == nil {
@@ -141,12 +139,6 @@ func (t *Tracker) clean() {
// Nope, dead, drop it
t.expire.Remove(head)
delete(t.pending, id)
-
- g := fmt.Sprintf("%s/%s/%d/%#02x", trackedGaugeName, t.protocol, req.version, req.reqCode)
- metrics.GetOrRegisterGauge(g, nil).Dec(1)
-
- m := fmt.Sprintf("%s/%s/%d/%#02x", lostMeterName, t.protocol, req.version, req.reqCode)
- metrics.GetOrRegisterMeter(m, nil).Mark(1)
}
t.schedule()
}
@@ -163,17 +155,12 @@ func (t *Tracker) schedule() {
// Fulfil fills a pending request, if any is available, reporting on various metrics.
func (t *Tracker) Fulfil(peer string, version uint, code uint64, id uint64) {
- if !metrics.Enabled {
- return
- }
t.lock.Lock()
defer t.lock.Unlock()
// If it's a non existing request, track as stale response
req, ok := t.pending[id]
if !ok {
- m := fmt.Sprintf("%s/%s/%d/%#02x", staleMeterName, t.protocol, version, code)
- metrics.GetOrRegisterMeter(m, nil).Mark(1)
return
}
// If the response is funky, it might be some active attack
@@ -192,14 +179,4 @@ func (t *Tracker) Fulfil(peer string, version uint, code uint64, id uint64) {
t.schedule()
}
}
- g := fmt.Sprintf("%s/%s/%d/%#02x", trackedGaugeName, t.protocol, req.version, req.reqCode)
- metrics.GetOrRegisterGauge(g, nil).Dec(1)
-
- h := fmt.Sprintf("%s/%s/%d/%#02x", waitHistName, t.protocol, req.version, req.reqCode)
- sampler := func() metrics.Sample {
- return metrics.ResettingSample(
- metrics.NewExpDecaySample(1028, 0.015),
- )
- }
- metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(req.time).Microseconds())
}
diff --git a/p2p/transport.go b/p2p/transport.go
index 32d76c6b03..08c2bf751d 100644
--- a/p2p/transport.go
+++ b/p2p/transport.go
@@ -27,7 +27,8 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/common/bitutil"
- "github.com/dominant-strategies/go-quai/metrics"
+ "github.com/dominant-strategies/go-quai/metrics_config"
+
"github.com/dominant-strategies/go-quai/p2p/rlpx"
"github.com/dominant-strategies/go-quai/rlp"
)
@@ -97,10 +98,7 @@ func (t *rlpxTransport) WriteMsg(msg Msg) error {
// Set metrics.
msg.meterSize = size
- if metrics.Enabled && msg.meterCap.Name != "" { // don't meter non-subprotocol messages
- m := fmt.Sprintf("%s/%s/%d/%#02x", egressMeterName, msg.meterCap.Name, msg.meterCap.Version, msg.meterCode)
- metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize))
- metrics.GetOrRegisterMeter(m+"/packets", nil).Mark(1)
+ if metrics_config.MetricsEnabled() && msg.meterCap.Name != "" { // don't meter non-subprotocol messages
}
return nil
}
diff --git a/rpc/handler.go b/rpc/handler.go
index 85443b0810..46cb8e5ca5 100644
--- a/rpc/handler.go
+++ b/rpc/handler.go
@@ -333,20 +333,14 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage
if err != nil {
return msg.errorResponse(&invalidParamsError{err.Error()})
}
- start := time.Now()
answer := h.runMethod(cp.ctx, msg, callb, args)
// Collect the statistics for RPC calls if metrics is enabled.
// We only care about pure rpc call. Filter out subscription.
if callb != h.unsubscribeCb {
- rpcRequestGauge.Inc(1)
if answer.Error != nil {
- failedReqeustGauge.Inc(1)
} else {
- successfulRequestGauge.Inc(1)
}
- rpcServingTimer.UpdateSince(start)
- newRPCServingTimer(msg.Method, answer.Error == nil).UpdateSince(start)
}
return answer
}
diff --git a/rpc/metrics.go b/rpc/metrics.go
deleted file mode 100644
index 09589d4c27..0000000000
--- a/rpc/metrics.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2020 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package rpc
-
-import (
- "fmt"
-
- "github.com/dominant-strategies/go-quai/metrics"
-)
-
-var (
- rpcRequestGauge = metrics.NewRegisteredGauge("rpc/requests", nil)
- successfulRequestGauge = metrics.NewRegisteredGauge("rpc/success", nil)
- failedReqeustGauge = metrics.NewRegisteredGauge("rpc/failure", nil)
- rpcServingTimer = metrics.NewRegisteredTimer("rpc/duration/all", nil)
-)
-
-func newRPCServingTimer(method string, valid bool) metrics.Timer {
- flag := "success"
- if !valid {
- flag = "failure"
- }
- m := fmt.Sprintf("rpc/duration/%s/%s", method, flag)
- return metrics.GetOrRegisterTimer(m, nil)
-}
diff --git a/trie/database.go b/trie/database.go
index f23db8be83..72e7400e43 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -30,32 +30,11 @@ import (
"github.com/dominant-strategies/go-quai/core/rawdb"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
"github.com/dominant-strategies/go-quai/rlp"
)
var (
- memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
- memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
- memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
- memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
-
- memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
- memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
- memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
- memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
-
- memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
- memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
- memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
-
- memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
- memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
- memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
-
- memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
- memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
- memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
)
// Database is an intermediate write layer between the trie data structures and
@@ -325,7 +304,6 @@ func (db *Database) insert(hash common.Hash, size int, node node) {
if _, ok := db.dirties[hash]; ok {
return
}
- memcacheDirtyWriteMeter.Mark(int64(size))
// Create the cached entry for this node
entry := &cachedNode{
@@ -373,8 +351,6 @@ func (db *Database) node(hash common.Hash) node {
// Retrieve the node from the clean cache if available
if db.cleans != nil {
if enc := db.cleans.Get(nil, hash[:]); enc != nil {
- memcacheCleanHitMeter.Mark(1)
- memcacheCleanReadMeter.Mark(int64(len(enc)))
return mustDecodeNode(hash[:], enc)
}
}
@@ -384,11 +360,8 @@ func (db *Database) node(hash common.Hash) node {
db.lock.RUnlock()
if dirty != nil {
- memcacheDirtyHitMeter.Mark(1)
- memcacheDirtyReadMeter.Mark(int64(dirty.size))
return dirty.obj(hash)
}
- memcacheDirtyMissMeter.Mark(1)
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
@@ -397,8 +370,6 @@ func (db *Database) node(hash common.Hash) node {
}
if db.cleans != nil {
db.cleans.Set(hash[:], enc)
- memcacheCleanMissMeter.Mark(1)
- memcacheCleanWriteMeter.Mark(int64(len(enc)))
}
return mustDecodeNode(hash[:], enc)
}
@@ -413,8 +384,6 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
// Retrieve the node from the clean cache if available
if db.cleans != nil {
if enc := db.cleans.Get(nil, hash[:]); enc != nil {
- memcacheCleanHitMeter.Mark(1)
- memcacheCleanReadMeter.Mark(int64(len(enc)))
return enc, nil
}
}
@@ -424,19 +393,14 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
db.lock.RUnlock()
if dirty != nil {
- memcacheDirtyHitMeter.Mark(1)
- memcacheDirtyReadMeter.Mark(int64(dirty.size))
return dirty.rlp(), nil
}
- memcacheDirtyMissMeter.Mark(1)
// Content unavailable in memory, attempt to retrieve from disk
enc := rawdb.ReadTrieNode(db.diskdb, hash)
if len(enc) != 0 {
if db.cleans != nil {
db.cleans.Set(hash[:], enc)
- memcacheCleanMissMeter.Mark(1)
- memcacheCleanWriteMeter.Mark(int64(len(enc)))
}
return enc, nil
}
@@ -526,10 +490,6 @@ func (db *Database) Dereference(root common.Hash) {
db.gcsize += storage - db.dirtiesSize
db.gctime += time.Since(start)
- memcacheGCTimeTimer.Update(time.Since(start))
- memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
- memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
-
log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
}
@@ -676,10 +636,6 @@ func (db *Database) Cap(limit common.StorageSize) error {
db.flushsize += storage - db.dirtiesSize
db.flushtime += time.Since(start)
- memcacheFlushTimeTimer.Update(time.Since(start))
- memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
- memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
-
log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
@@ -734,9 +690,6 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H
if db.preimages != nil {
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0
}
- memcacheCommitTimeTimer.Update(time.Since(start))
- memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
- memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
logger := log.Info
if !report {
@@ -825,7 +778,6 @@ func (c *cleaner) Put(key []byte, rlp []byte) error {
// Move the flushed node into the clean cache to prevent insta-reloads
if c.db.cleans != nil {
c.db.cleans.Set(hash[:], rlp)
- memcacheCleanWriteMeter.Mark(int64(len(rlp)))
}
return nil
}
diff --git a/trie/sync.go b/trie/sync.go
index b93f878d2c..8f2cbae3b1 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -163,8 +163,6 @@ func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, cal
if len(blob) > 0 {
return
}
- // False positive, bump fault meter
- bloomFaultMeter.Mark(1)
}
// Assemble the new sub-trie sync request
req := &request{
@@ -205,8 +203,6 @@ func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) {
if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 {
return
}
- // False positive, bump fault meter
- bloomFaultMeter.Mark(1)
}
// Assemble the new sub-trie sync request
req := &request{
@@ -424,8 +420,6 @@ func (s *Sync) children(req *request, object node) ([]*request, error) {
if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 {
continue
}
- // False positive, bump fault meter
- bloomFaultMeter.Mark(1)
}
// Locally unknown node, schedule for retrieval
requests = append(requests, &request{
diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go
index 128d4c6d89..939c0f7ddf 100644
--- a/trie/sync_bloom.go
+++ b/trie/sync_bloom.go
@@ -27,17 +27,11 @@ import (
"github.com/dominant-strategies/go-quai/core/rawdb"
"github.com/dominant-strategies/go-quai/ethdb"
"github.com/dominant-strategies/go-quai/log"
- "github.com/dominant-strategies/go-quai/metrics"
+
bloomfilter "github.com/holiman/bloomfilter/v2"
)
var (
- bloomAddMeter = metrics.NewRegisteredMeter("trie/bloom/add", nil)
- bloomLoadMeter = metrics.NewRegisteredMeter("trie/bloom/load", nil)
- bloomTestMeter = metrics.NewRegisteredMeter("trie/bloom/test", nil)
- bloomMissMeter = metrics.NewRegisteredMeter("trie/bloom/miss", nil)
- bloomFaultMeter = metrics.NewRegisteredMeter("trie/bloom/fault", nil)
- bloomErrorGauge = metrics.NewRegisteredGauge("trie/bloom/error", nil)
)
// SyncBloom is a bloom filter used during fast sync to quickly decide if a trie
@@ -100,11 +94,9 @@ func (b *SyncBloom) init(database ethdb.Iteratee) {
key := it.Key()
if len(key) == common.HashLength {
b.bloom.AddHash(binary.BigEndian.Uint64(key))
- bloomLoadMeter.Mark(1)
} else if ok, hash := rawdb.IsCodeKey(key); ok {
// If the database entry is a contract code, add it to the bloom
b.bloom.AddHash(binary.BigEndian.Uint64(hash))
- bloomLoadMeter.Mark(1)
}
// If enough time elapsed since the last iterator swap, restart
if time.Since(swap) > 8*time.Second {
@@ -133,7 +125,6 @@ func (b *SyncBloom) meter() {
select {
case <-tick.C:
// Report the current error ration. No floats, lame, scale it up.
- bloomErrorGauge.Update(int64(b.bloom.FalsePosititveProbability() * 100000))
case <-b.closeCh:
return
}
@@ -164,7 +155,6 @@ func (b *SyncBloom) Add(hash []byte) {
return
}
b.bloom.AddHash(binary.BigEndian.Uint64(hash))
- bloomAddMeter.Mark(1)
}
// Contains tests if the bloom filter contains the given hash:
@@ -173,7 +163,6 @@ func (b *SyncBloom) Add(hash []byte) {
//
// While the bloom is being initialized, any query will return true.
func (b *SyncBloom) Contains(hash []byte) bool {
- bloomTestMeter.Mark(1)
if atomic.LoadUint32(&b.inited) == 0 {
// We didn't load all the trie nodes from the previous run of Quai yet. As
// such, we can't say for sure if a hash is not present for anything. Until
@@ -183,7 +172,6 @@ func (b *SyncBloom) Contains(hash []byte) bool {
// Bloom initialized, check the real one and report any successful misses
maybe := b.bloom.ContainsHash(binary.BigEndian.Uint64(hash))
if !maybe {
- bloomMissMeter.Mark(1)
}
return maybe
}