Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update logging config to allow for more control over loggers #1071

Merged
merged 7 commits into from
Mar 21, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions autopilot/autopilot.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ func (ap *Autopilot) Run() error {
// launch account refills after successful contract maintenance.
if maintenanceSuccess {
launchAccountRefillsOnce.Do(func() {
ap.logger.Debug("account refills loop launched")
ap.logger.Info("account refills loop launched")
go ap.a.refillWorkersAccountsLoop(ap.shutdownCtx)
})
}
Expand All @@ -330,7 +330,7 @@ func (ap *Autopilot) Run() error {
if ap.state.cfg.Contracts.Prune {
ap.c.tryPerformPruning(ap.workers)
} else {
ap.logger.Debug("pruning disabled")
ap.logger.Info("pruning disabled")
}
})

Expand Down
64 changes: 32 additions & 32 deletions autopilot/contractor.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) (
for _, c := range currentSet {
isInCurrentSet[c.ID] = struct{}{}
}
c.logger.Debugf("contract set '%s' holds %d contracts", state.cfg.Contracts.Set, len(currentSet))
c.logger.Infof("contract set '%s' holds %d contracts", state.cfg.Contracts.Set, len(currentSet))

// fetch all contracts from the worker.
start := time.Now()
Expand All @@ -224,7 +224,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) (
c.logger.Error(resp.Error)
}
contracts := resp.Contracts
c.logger.Debugf("fetched %d contracts from the worker, took %v", len(resp.Contracts), time.Since(start))
c.logger.Infof("fetched %d contracts from the worker, took %v", len(resp.Contracts), time.Since(start))

// run revision broadcast
c.runRevisionBroadcast(ctx, w, contracts, isInCurrentSet)
Expand Down Expand Up @@ -317,7 +317,7 @@ func (c *contractor) performContractMaintenance(ctx context.Context, w Worker) (

// archive contracts
if len(toArchive) > 0 {
c.logger.Debugf("archiving %d contracts: %+v", len(toArchive), toArchive)
c.logger.Infof("archiving %d contracts: %+v", len(toArchive), toArchive)
if err := c.ap.bus.ArchiveContracts(ctx, toArchive); err != nil {
c.logger.Errorf("failed to archive contracts, err: %v", err) // continue
}
Expand Down Expand Up @@ -487,7 +487,7 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string,
Time: now,
})
setRemovals[contract.ID] = removals
c.logger.Debugf("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason)
c.logger.Infof("contract %v was removed from the contract set, size: %v, reason: %v", contract.ID, contractData[contract.ID], reason)
}
}
for _, contract := range newSet {
Expand All @@ -505,20 +505,20 @@ func (c *contractor) computeContractSetChanged(ctx context.Context, name string,
Time: now,
})
setAdditions[contract.ID] = additions
c.logger.Debugf("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID])
c.logger.Infof("contract %v was added to the contract set, size: %v", contract.ID, contractData[contract.ID])
}
}

// log renewed contracts that did not make it into the contract set
for _, fcid := range renewed {
_, exists := inNewSet[fcid.to.ID]
if !exists {
c.logger.Debugf("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to.ID])
c.logger.Infof("contract %v was renewed but did not make it into the contract set, size: %v", fcid, contractData[fcid.to.ID])
}
}

// log a warning if the contract set does not contain enough contracts
logFn := c.logger.Debugw
logFn := c.logger.Infow
if len(newSet) < int(c.ap.State().rs.TotalShards) {
logFn = c.logger.Warnw
}
Expand Down Expand Up @@ -626,7 +626,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error {
for _, txn := range pending {
for _, mTxnID := range c.maintenanceTxnIDs {
if mTxnID == txn.ID() {
l.Debugf("wallet maintenance skipped, pending transaction found with id %v", mTxnID)
l.Infof("wallet maintenance skipped, pending transaction found with id %v", mTxnID)
return nil
}
}
Expand All @@ -640,7 +640,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error {
return err
}
if uint64(len(available)) >= uint64(wantedNumOutputs) {
l.Debugf("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), uint64(wantedNumOutputs))
l.Infof("no wallet maintenance needed, plenty of outputs available (%v>=%v)", len(available), uint64(wantedNumOutputs))
return nil
}
wantedNumOutputs -= len(available)
Expand All @@ -654,7 +654,7 @@ func (c *contractor) performWalletMaintenance(ctx context.Context) error {
return fmt.Errorf("failed to redistribute wallet into %d outputs of amount %v, balance %v, err %v", wantedNumOutputs, amount, balance, err)
}

l.Debugf("wallet maintenance succeeded, txns %v", ids)
l.Infof("wallet maintenance succeeded, txns %v", ids)
c.maintenanceTxnIDs = ids
return nil
}
Expand All @@ -663,7 +663,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts
if c.ap.isStopped() {
return
}
c.logger.Debug("running contract checks")
c.logger.Info("running contract checks")

// convenience variables
state := c.ap.State()
Expand All @@ -685,7 +685,7 @@ func (c *contractor) runContractChecks(ctx context.Context, w Worker, contracts

var notfound int
defer func() {
c.logger.Debugw(
c.logger.Infow(
"contracts checks completed",
"contracts", len(contracts),
"notfound", notfound,
Expand Down Expand Up @@ -853,15 +853,15 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid
state := c.ap.State()
shouldFilter := !state.cfg.Hosts.AllowRedundantIPs

c.logger.Debugw(
c.logger.Infow(
"run contract formations",
"usedHosts", len(usedHosts),
"required", state.cfg.Contracts.Amount,
"missing", missing,
"budget", budget,
)
defer func() {
c.logger.Debugw(
c.logger.Infow(
"contract formations completed",
"formed", len(formed),
"budget", budget,
Expand All @@ -873,7 +873,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid
selected := candidates.randSelectByScore(wanted)

// print warning if we couldn't find enough hosts were found
c.logger.Debugf("looking for %d candidate hosts", wanted)
c.logger.Infof("looking for %d candidate hosts", wanted)
if len(selected) < wanted {
msg := "no candidate hosts found"
if len(selected) > 0 {
Expand All @@ -882,7 +882,7 @@ func (c *contractor) runContractFormations(ctx context.Context, w Worker, candid
if len(candidates) >= wanted {
c.logger.Warnw(msg, unusableHosts.keysAndValues()...)
} else {
c.logger.Debugw(msg, unusableHosts.keysAndValues()...)
c.logger.Infow(msg, unusableHosts.keysAndValues()...)
}
}

Expand Down Expand Up @@ -1030,14 +1030,14 @@ func (c *contractor) runRevisionBroadcast(ctx context.Context, w Worker, allCont
}

func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew []contractInfo, budget *types.Currency, limit int) (renewals []renewal, toKeep []api.ContractMetadata) {
c.logger.Debugw(
c.logger.Infow(
"run contracts renewals",
"torenew", len(toRenew),
"limit", limit,
"budget", budget,
)
defer func() {
c.logger.Debugw(
c.logger.Infow(
"contracts renewals completed",
"renewals", len(renewals),
"tokeep", len(toKeep),
Expand Down Expand Up @@ -1088,13 +1088,13 @@ func (c *contractor) runContractRenewals(ctx context.Context, w Worker, toRenew
}

func (c *contractor) runContractRefreshes(ctx context.Context, w Worker, toRefresh []contractInfo, budget *types.Currency) (refreshed []renewal, _ error) {
c.logger.Debugw(
c.logger.Infow(
"run contracts refreshes",
"torefresh", len(toRefresh),
"budget", budget,
)
defer func() {
c.logger.Debugw(
c.logger.Infow(
"contracts refreshes completed",
"refreshed", len(refreshed),
"budget", budget,
Expand Down Expand Up @@ -1152,7 +1152,7 @@ func (c *contractor) refreshFundingEstimate(cfg api.AutopilotConfig, ci contract
if refreshAmountCapped.Cmp(minimum) < 0 {
refreshAmountCapped = minimum
}
c.logger.Debugw("refresh estimate",
c.logger.Infow("refresh estimate",
"fcid", ci.contract.ID,
"refreshAmount", refreshAmount,
"refreshAmountCapped", refreshAmountCapped)
Expand Down Expand Up @@ -1228,7 +1228,7 @@ func (c *contractor) renewFundingEstimate(ctx context.Context, ci contractInfo,
}

if renewing {
c.logger.Debugw("renew estimate",
c.logger.Infow("renew estimate",
"fcid", ci.contract.ID,
"dataStored", dataStored,
"storageCost", storageCost.String(),
Expand Down Expand Up @@ -1327,7 +1327,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo
unused = append(unused, h)
}

c.logger.Debugw(fmt.Sprintf("selected %d (potentially) usable hosts for scoring out of %d", len(unused), len(hosts)),
c.logger.Infow(fmt.Sprintf("selected %d (potentially) usable hosts for scoring out of %d", len(unused), len(hosts)),
"excluded", excluded,
"notcompletedscan", notcompletedscan,
"used", len(usedHosts))
Expand Down Expand Up @@ -1360,7 +1360,7 @@ func (c *contractor) candidateHosts(ctx context.Context, hosts []hostdb.HostInfo
unusable++
}

c.logger.Debugw(fmt.Sprintf("scored %d unused hosts out of %v, took %v", len(candidates), len(unused), time.Since(start)),
c.logger.Infow(fmt.Sprintf("scored %d unused hosts out of %v, took %v", len(candidates), len(unused), time.Since(start)),
"zeroscore", zeros,
"unusable", unusable,
"used", len(usedHosts))
Expand Down Expand Up @@ -1397,14 +1397,14 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf

// check our budget
if budget.Cmp(renterFunds) < 0 {
c.logger.Debugw("insufficient budget", "budget", budget, "needed", renterFunds)
c.logger.Infow("insufficient budget", "budget", budget, "needed", renterFunds)
return api.ContractMetadata{}, false, errors.New("insufficient budget")
}

// sanity check the endheight is not the same on renewals
endHeight := endHeight(cfg, state.period)
if endHeight <= rev.EndHeight() {
c.logger.Debugw("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", state.period, "bh", cs.BlockHeight)
c.logger.Infow("invalid renewal endheight", "oldEndheight", rev.EndHeight(), "newEndHeight", endHeight, "period", state.period, "bh", cs.BlockHeight)
return api.ContractMetadata{}, false, fmt.Errorf("renewal endheight should surpass the current contract endheight, %v <= %v", endHeight, rev.EndHeight())
}

Expand Down Expand Up @@ -1440,7 +1440,7 @@ func (c *contractor) renewContract(ctx context.Context, w Worker, ci contractInf
}

newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice)
c.logger.Debugw(
c.logger.Infow(
"renewal succeeded",
"fcid", renewedContract.ID,
"renewedFrom", fcid,
Expand Down Expand Up @@ -1493,7 +1493,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI
resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, state.address, renterFunds, minNewCollateral, expectedStorage, settings.WindowSize)
if err != nil {
if strings.Contains(err.Error(), "new collateral is too low") {
c.logger.Debugw("refresh failed: contract wouldn't have enough collateral after refresh",
c.logger.Infow("refresh failed: contract wouldn't have enough collateral after refresh",
"hk", hk,
"fcid", fcid,
"unallocatedCollateral", unallocatedCollateral.String(),
Expand All @@ -1520,7 +1520,7 @@ func (c *contractor) refreshContract(ctx context.Context, w Worker, ci contractI

// add to renewed set
newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice)
c.logger.Debugw("refresh succeeded",
c.logger.Infow("refresh succeeded",
"fcid", refreshedContract.ID,
"renewedFrom", contract.ID,
"renterFunds", renterFunds.String(),
Expand All @@ -1538,7 +1538,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos
// fetch host settings
scan, err := w.RHPScan(ctx, hk, host.NetAddress, 0)
if err != nil {
c.logger.Debugw(err.Error(), "hk", hk)
c.logger.Infow(err.Error(), "hk", hk)
return api.ContractMetadata{}, true, err
}

Expand All @@ -1552,7 +1552,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos
txnFee := state.fee.Mul64(estimatedFileContractTransactionSetSize)
renterFunds := initialContractFunding(scan.Settings, txnFee, minInitialContractFunds, maxInitialContractFunds)
if budget.Cmp(renterFunds) < 0 {
c.logger.Debugw("insufficient budget", "budget", budget, "needed", renterFunds)
c.logger.Infow("insufficient budget", "budget", budget, "needed", renterFunds)
return api.ContractMetadata{}, false, errors.New("insufficient budget")
}

Expand Down Expand Up @@ -1583,7 +1583,7 @@ func (c *contractor) formContract(ctx context.Context, w Worker, host hostdb.Hos
return api.ContractMetadata{}, true, err
}

c.logger.Debugw("formation succeeded",
c.logger.Infow("formation succeeded",
"hk", hk,
"fcid", formedContract.ID,
"renterFunds", renterFunds.String(),
Expand Down
2 changes: 1 addition & 1 deletion autopilot/ipfilter.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func (r *ipResolver) lookup(hostIP string) ([]string, error) {
// check the cache if it's an i/o timeout or server misbehaving error
if utils.IsErr(err, errIOTimeout) || utils.IsErr(err, errServerMisbehaving) {
if entry, found := r.cache[hostIP]; found && time.Since(entry.created) < ipCacheEntryValidity {
r.logger.Debugf("using cached IP addresses for %v, err: %v", hostIP, err)
r.logger.Infof("using cached IP addresses for %v, err: %v", hostIP, err)
return entry.subnets, nil
}
}
Expand Down
6 changes: 3 additions & 3 deletions autopilot/migrator.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,15 +213,15 @@ OUTER:
m.logger.Errorf("failed to recompute cached health before migration: %v", err)
return
}
m.logger.Debugf("recomputed slab health in %v", time.Since(start))
m.logger.Infof("recomputed slab health in %v", time.Since(start))

// fetch slabs for migration
toMigrateNew, err := b.SlabsForMigration(m.ap.shutdownCtx, m.healthCutoff, set, migratorBatchSize)
if err != nil {
m.logger.Errorf("failed to fetch slabs for migration, err: %v", err)
return
}
m.logger.Debugf("%d potential slabs fetched for migration", len(toMigrateNew))
m.logger.Infof("%d potential slabs fetched for migration", len(toMigrateNew))

// merge toMigrateNew with toMigrate
// NOTE: when merging, we remove all slabs from toMigrate that don't
Expand Down Expand Up @@ -256,7 +256,7 @@ OUTER:
migrateNewMap = nil // free map

// log the updated list of slabs to migrate
m.logger.Debugf("%d slabs to migrate", len(toMigrate))
m.logger.Infof("%d slabs to migrate", len(toMigrate))

// register an alert to notify users about ongoing migrations.
if len(toMigrate) > 0 {
Expand Down
10 changes: 5 additions & 5 deletions autopilot/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo
minRecentScanFailures := hostCfg.MinRecentScanFailures

if !interrupted && maxDowntime > 0 {
s.logger.Debugf("removing hosts that have been offline for more than %v and have failed at least %d scans", maxDowntime, minRecentScanFailures)
s.logger.Infof("removing hosts that have been offline for more than %v and have failed at least %d scans", maxDowntime, minRecentScanFailures)
removed, err := s.bus.RemoveOfflineHosts(ctx, minRecentScanFailures, maxDowntime)
if err != nil {
s.logger.Errorf("error occurred while removing offline hosts, err: %v", err)
Expand All @@ -226,7 +226,7 @@ func (s *scanner) tryPerformHostScan(ctx context.Context, w scanWorker, force bo

s.mu.Lock()
s.scanning = false
s.logger.Debugf("%s finished after %v", st, time.Since(s.scanningLastStart))
s.logger.Infof("%s finished after %v", st, time.Since(s.scanningLastStart))
s.mu.Unlock()
}(scanType)
return
Expand All @@ -241,12 +241,12 @@ func (s *scanner) tryUpdateTimeout() {

updated := s.tracker.timeout()
if updated < s.timeoutMinTimeout {
s.logger.Debugf("updated timeout is lower than min timeout, %v<%v", updated, s.timeoutMinTimeout)
s.logger.Infof("updated timeout is lower than min timeout, %v<%v", updated, s.timeoutMinTimeout)
updated = s.timeoutMinTimeout
}

if s.timeout != updated {
s.logger.Debugf("updated timeout %v->%v", s.timeout, updated)
s.logger.Infof("updated timeout %v->%v", s.timeout, updated)
s.timeout = updated
}
s.timeoutLastUpdate = time.Now()
Expand Down Expand Up @@ -281,7 +281,7 @@ func (s *scanner) launchHostScans() chan scanReq {
exhausted = true
}

s.logger.Debugf("scanning %d hosts in range %d-%d", len(hosts), offset, offset+int(s.scanBatchSize))
s.logger.Infof("scanning %d hosts in range %d-%d", len(hosts), offset, offset+int(s.scanBatchSize))
offset += int(s.scanBatchSize)

// add batch to scan queue
Expand Down
Loading
Loading