Skip to content

Commit

Permalink
Move some logs to debug level
Browse files Browse the repository at this point in the history
This should reduce the pressure on the file system and RAM without impacting
our ability to troubleshoot common issues.
  • Loading branch information
t-bast committed Aug 29, 2023
1 parent 8d42052 commit 3174b43
Show file tree
Hide file tree
Showing 13 changed files with 33 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
if (fees.feeBase != normal.channelUpdate.feeBaseMsat ||
fees.feeProportionalMillionths != normal.channelUpdate.feeProportionalMillionths ||
nodeParams.channelConf.expiryDelta != normal.channelUpdate.cltvExpiryDelta) {
log.info("refreshing channel_update due to configuration changes")
log.debug("refreshing channel_update due to configuration changes")
self ! CMD_UPDATE_RELAY_FEE(ActorRef.noSender, fees.feeBase, fees.feeProportionalMillionths, Some(nodeParams.channelConf.expiryDelta))
}
// we need to periodically re-send channel updates, otherwise channel will be considered stale and get pruned by network
Expand Down Expand Up @@ -804,7 +804,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with

case Event(c: CMD_UPDATE_RELAY_FEE, d: DATA_NORMAL) =>
val channelUpdate1 = Announcements.makeChannelUpdate(nodeParams.chainHash, nodeParams.privateKey, remoteNodeId, scidForChannelUpdate(d), c.cltvExpiryDelta_opt.getOrElse(d.channelUpdate.cltvExpiryDelta), d.channelUpdate.htlcMinimumMsat, c.feeBase, c.feeProportionalMillionths, d.commitments.params.maxHtlcAmount, isPrivate = !d.commitments.announceChannel, enable = Helpers.aboveReserve(d.commitments))
log.info(s"updating relay fees: prev={} next={}", d.channelUpdate.toStringShort, channelUpdate1.toStringShort)
log.debug(s"updating relay fees: prev={} next={}", d.channelUpdate.toStringShort, channelUpdate1.toStringShort)
val replyTo = if (c.replyTo == ActorRef.noSender) sender() else c.replyTo
replyTo ! RES_SUCCESS(c, d.channelId)
// we use goto() instead of stay() because we want to fire transitions
Expand Down Expand Up @@ -2496,7 +2496,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with
// we will only emit a new channel_update with the disable flag set if someone tries to use that channel
if (d.channelUpdate.channelFlags.isEnabled) {
// if the channel isn't disabled we generate a new channel_update
log.info("updating channel_update announcement (reason=disabled)")
log.debug("updating channel_update announcement (reason=disabled)")
val channelUpdate1 = Announcements.makeChannelUpdate(nodeParams.chainHash, nodeParams.privateKey, remoteNodeId, scidForChannelUpdate(d), d.channelUpdate.cltvExpiryDelta, d.channelUpdate.htlcMinimumMsat, d.channelUpdate.feeBaseMsat, d.channelUpdate.feeProportionalMillionths, d.commitments.params.maxHtlcAmount, isPrivate = !d.commitments.announceChannel, enable = false)
// then we update the state and replay the request
self forward c
Expand All @@ -2511,7 +2511,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with

private def handleUpdateRelayFeeDisconnected(c: CMD_UPDATE_RELAY_FEE, d: DATA_NORMAL) = {
val channelUpdate1 = Announcements.makeChannelUpdate(nodeParams.chainHash, nodeParams.privateKey, remoteNodeId, scidForChannelUpdate(d), c.cltvExpiryDelta_opt.getOrElse(d.channelUpdate.cltvExpiryDelta), d.channelUpdate.htlcMinimumMsat, c.feeBase, c.feeProportionalMillionths, d.commitments.params.maxHtlcAmount, isPrivate = !d.commitments.announceChannel, enable = false)
log.info(s"updating relay fees: prev={} next={}", d.channelUpdate.toStringShort, channelUpdate1.toStringShort)
log.debug(s"updating relay fees: prev={} next={}", d.channelUpdate.toStringShort, channelUpdate1.toStringShort)
val replyTo = if (c.replyTo == ActorRef.noSender) sender() else c.replyTo
replyTo ! RES_SUCCESS(c, d.channelId)
// We're in OFFLINE state, by using stay() instead of goto() we skip the transition handler and won't broadcast the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ trait ErrorHandlers extends CommonHandlers {
private def publishIfNeeded(txs: Iterable[PublishTx], irrevocablySpent: Map[OutPoint, Transaction]): Unit = {
val (skip, process) = txs.partition(publishTx => Closing.inputAlreadySpent(publishTx.input, irrevocablySpent))
process.foreach { publishTx => txPublisher ! publishTx }
skip.foreach(publishTx => log.info("no need to republish tx spending {}:{}, it has already been confirmed", publishTx.input.txid, publishTx.input.index))
skip.foreach(publishTx => log.debug("no need to republish tx spending {}:{}, it has already been confirmed", publishTx.input.txid, publishTx.input.index))
}

/**
Expand All @@ -165,7 +165,7 @@ trait ErrorHandlers extends CommonHandlers {
private def watchConfirmedIfNeeded(txs: Iterable[Transaction], irrevocablySpent: Map[OutPoint, Transaction]): Unit = {
val (skip, process) = txs.partition(Closing.inputsAlreadySpent(_, irrevocablySpent))
process.foreach(tx => blockchain ! WatchTxConfirmed(self, tx.txid, nodeParams.channelConf.minDepthBlocks))
skip.foreach(tx => log.info(s"no need to watch txid=${tx.txid}, it has already been confirmed"))
skip.foreach(tx => log.debug(s"no need to watch txid=${tx.txid}, it has already been confirmed"))
}

/**
Expand All @@ -180,7 +180,7 @@ trait ErrorHandlers extends CommonHandlers {
}
val (skip, process) = outputs.partition(irrevocablySpent.contains)
process.foreach(output => blockchain ! WatchOutputSpent(self, parentTx.txid, output.index.toInt, Set.empty))
skip.foreach(output => log.info(s"no need to watch output=${output.txid}:${output.index}, it has already been spent by txid=${irrevocablySpent.get(output).map(_.txid)}"))
skip.foreach(output => log.debug(s"no need to watch output=${output.txid}:${output.index}, it has already been spent by txid=${irrevocablySpent.get(output).map(_.txid)}"))
}

def spendLocalCurrent(d: ChannelDataWithCommitments) = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ trait SingleFundingHandlers extends CommonFundingHandlers {
case Some(fundingTx) =>
// if we are funder, we never give up
// we cannot correctly set the fee, but it was correctly set when we initially published the transaction
log.info(s"republishing the funding tx...")
log.debug(s"republishing the funding tx...")
txPublisher ! PublishFinalTx(fundingTx, fundingTx.txIn.head.outPoint, "funding", 0 sat, None)
// we also check if the funding tx has been double-spent
checkDoubleSpent(fundingTx)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ private class MempoolTxMonitor(nodeParams: NodeParams,
}
Behaviors.same
} else if (confirmations < nodeParams.channelConf.minDepthBlocks) {
log.info("txid={} has {} confirmations, waiting to reach min depth", cmd.tx.txid, confirmations)
log.debug("txid={} has {} confirmations, waiting to reach min depth", cmd.tx.txid, confirmations)
cmd.replyTo ! TxRecentlyConfirmed(cmd.tx.txid, confirmations)
Behaviors.same
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ private class ReplaceableTxFunder(nodeParams: NodeParams,
case htlcTx: HtlcWithWitnessData =>
val htlcFeerate = cmd.commitment.localCommit.spec.htlcTxFeerate(cmd.commitment.params.commitmentFormat)
if (targetFeerate <= htlcFeerate) {
log.info("publishing {} without adding inputs: txid={}", cmd.desc, htlcTx.txInfo.tx.txid)
log.debug("publishing {} without adding inputs: txid={}", cmd.desc, htlcTx.txInfo.tx.txid)
sign(txWithWitnessData, htlcFeerate, htlcTx.txInfo.amountIn)
} else {
addWalletInputs(htlcTx, targetFeerate)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class PeerConnection(keyPair: KeyPair, conf: PeerConnection.Conf, switchboard: A
case Event(InitializeConnection(peer, chainHash, localFeatures, doSync), d: BeforeInitData) =>
d.transport ! TransportHandler.Listener(self)
Metrics.PeerConnectionsConnecting.withTag(Tags.ConnectionState, Tags.ConnectionStates.Initializing).increment()
log.info(s"using features=$localFeatures")
log.debug(s"using features=$localFeatures")
val localInit = d.pendingAuth.address match {
case remoteAddress if !d.pendingAuth.outgoing && conf.sendRemoteAddressInit && NodeAddress.isPublicIPAddress(remoteAddress) => protocol.Init(localFeatures, TlvStream(InitTlv.Networks(chainHash :: Nil), InitTlv.RemoteAddress(remoteAddress)))
case _ => protocol.Init(localFeatures, TlvStream(InitTlv.Networks(chainHash :: Nil)))
Expand Down Expand Up @@ -161,7 +161,7 @@ class PeerConnection(keyPair: KeyPair, conf: PeerConnection.Conf, switchboard: A

// we will delay all rebroadcasts with this value in order to prevent herd effects (each peer has a different delay)
val rebroadcastDelay = Random.nextInt(conf.maxRebroadcastDelay.toSeconds.toInt).seconds
log.info(s"rebroadcast will be delayed by $rebroadcastDelay")
log.debug(s"rebroadcast will be delayed by $rebroadcastDelay")
context.system.eventStream.subscribe(self, classOf[Rebroadcast])

goto(CONNECTED) using ConnectedData(d.chainHash, d.remoteNodeId, d.transport, d.peer, d.localInit, remoteInit, rebroadcastDelay, isPersistent = d.isPersistent)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class ReconnectionTask(nodeParams: NodeParams, remoteNodeId: PublicKey) extends
lazy val mediator = DistributedPubSub(context.system).mediator

private def connect(address: NodeAddress, origin: ActorRef, isPersistent: Boolean): Unit = {
log.info(s"connecting to $address")
log.debug(s"connecting to $address")
val req = ClientSpawner.ConnectionRequest(remoteNodeId, address, origin, isPersistent)
if (context.system.hasExtension(Cluster)) {
mediator ! Send(path = "/user/client-spawner", msg = req, localAffinity = false)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -118,16 +118,16 @@ class ChannelRelay private(nodeParams: NodeParams,
Behaviors.receiveMessagePartial {
case DoRelay =>
if (previousFailures.isEmpty) {
context.log.info(s"relaying htlc #${r.add.id} from channelId={} to requestedShortChannelId={} nextNode={}", r.add.channelId, r.payload.outgoingChannelId, nextNodeId_opt.getOrElse(""))
context.log.info("relaying htlc #{} from channelId={} to requestedShortChannelId={} nextNode={}", r.add.id, r.add.channelId, r.payload.outgoingChannelId, nextNodeId_opt.getOrElse(""))
}
context.log.debug("attempting relay previousAttempts={}", previousFailures.size)
handleRelay(previousFailures) match {
case RelayFailure(cmdFail) =>
Metrics.recordPaymentRelayFailed(Tags.FailureType(cmdFail), Tags.RelayType.Channel)
context.log.info(s"rejecting htlc reason=${cmdFail.reason}")
context.log.info("rejecting htlc reason={}", cmdFail.reason)
safeSendAndStop(r.add.channelId, cmdFail)
case RelaySuccess(selectedChannelId, cmdAdd) =>
context.log.info(s"forwarding htlc to channelId=$selectedChannelId")
context.log.info("forwarding htlc #{} from channelId={} to channelId={}", r.add.id, r.add.channelId, selectedChannelId)
register ! Register.Forward(forwardFailureAdapter, selectedChannelId, cmdAdd)
waitForAddResponse(selectedChannelId, previousFailures)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,8 @@ object RouteCalculation {
val routesToFind = if (r.routeParams.randomize) DEFAULT_ROUTES_COUNT else 1

log.info(s"finding routes ${r.source}->$targetNodeId with assistedChannels={} ignoreNodes={} ignoreChannels={} excludedChannels={}", extraEdges.map(_.desc.shortChannelId).mkString(","), r.ignore.nodes.map(_.value).mkString(","), r.ignore.channels.mkString(","), d.excludedChannels.mkString(","))
log.info("finding routes with params={}, multiPart={}", r.routeParams, r.allowMultiPart)
log.info("local channels to target node: {}", d.graphWithBalances.graph.getEdgesBetween(r.source, targetNodeId).map(e => s"${e.desc.shortChannelId} (${e.balance_opt}/${e.capacity})").mkString(", "))
log.debug("finding routes with params={}, multiPart={}", r.routeParams, r.allowMultiPart)
log.debug("local channels to target node: {}", d.graphWithBalances.graph.getEdgesBetween(r.source, targetNodeId).map(e => s"${e.desc.shortChannelId} (${e.balance_opt}/${e.capacity})").mkString(", "))
val tags = TagSet.Empty.withTag(Tags.MultiPart, r.allowMultiPart).withTag(Tags.Amount, Tags.amountBucket(amountToSend))
KamonExt.time(Metrics.FindRouteDuration.withTags(tags.withTag(Tags.NumberOfRoutes, routesToFind.toLong))) {
val result = if (r.allowMultiPart) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ class Router(val nodeParams: NodeParams, watcher: typed.ActorRef[ZmqWatcher.Comm
val nodeAnn = Announcements.makeNodeAnnouncement(nodeParams.privateKey, nodeParams.alias, nodeParams.color, nodeParams.publicAddresses, nodeParams.features.nodeAnnouncementFeatures())
self ! nodeAnn

log.info(s"initialization completed, ready to process messages")
log.debug("initialization completed, ready to process messages")
Try(initialized.map(_.success(Done)))
val data = Data(
nodes.map(n => n.nodeId -> n).toMap, channels, pruned,
Expand All @@ -122,12 +122,12 @@ class Router(val nodeParams: NodeParams, watcher: typed.ActorRef[ZmqWatcher.Comm
case Event(SyncProgress(progress), d: Data) =>
Metrics.SyncProgress.withoutTags().update(100 * progress)
if (progress == 1.0 && d.channels.nonEmpty) {
log.info("initial routing sync done")
log.debug("initial routing sync done")
}
stay()

case Event(GetRoutingState, d: Data) =>
log.info(s"getting valid announcements for ${sender()}")
log.debug(s"getting valid announcements for ${sender()}")
sender() ! RoutingState(d.channels.values, d.nodes.values)
stay()

Expand Down
8 changes: 4 additions & 4 deletions eclair-core/src/main/scala/fr/acinq/eclair/router/Sync.scala
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ object Sync {
if (s.replacePrevious || !d.sync.contains(s.remoteNodeId)) {
// ask for everything
val query = QueryChannelRange(s.chainHash, firstBlock = BlockHeight(0), numberOfBlocks = Int.MaxValue.toLong, TlvStream(s.flags_opt.toSet))
log.info("sending query_channel_range={}", query)
log.debug("sending query_channel_range={}", query)
s.to ! query

// we also set a pass-all filter for now (we can update it later) for the future gossip messages, by setting
Expand Down Expand Up @@ -94,7 +94,7 @@ object Sync {

d.sync.get(origin.nodeId) match {
case None =>
log.info("received unsolicited reply_channel_range with {} channels", r.shortChannelIds.array.size)
log.debug("received unsolicited reply_channel_range with {} channels", r.shortChannelIds.array.size)
d // we didn't request a sync from this node, ignore
case Some(currentSync) if currentSync.remainingQueries.isEmpty && r.shortChannelIds.array.isEmpty =>
// NB: this case deals with peers who don't return any sync data. We're currently not correctly detecting the end
Expand Down Expand Up @@ -127,7 +127,7 @@ object Sync {
val u1 = u + (if (QueryShortChannelIdsTlv.QueryFlagType.includeUpdate1(flag)) 1 else 0) + (if (QueryShortChannelIdsTlv.QueryFlagType.includeUpdate2(flag)) 1 else 0)
(c1, u1)
}
log.info(s"received reply_channel_range with {} channels, we're missing {} channel announcements and {} updates, format={}", r.shortChannelIds.array.size, channelCount, updatesCount, r.shortChannelIds.encoding)
log.info("received reply_channel_range with {} channels, we're missing {} channel announcements and {} updates, format={}", r.shortChannelIds.array.size, channelCount, updatesCount, r.shortChannelIds.encoding)
Metrics.ReplyChannelRange.NewChannelAnnouncements.withoutTags().record(channelCount)
Metrics.ReplyChannelRange.NewChannelUpdates.withoutTags().record(updatesCount)

Expand Down Expand Up @@ -186,7 +186,7 @@ object Sync {
Metrics.QueryShortChannelIds.Nodes.withoutTags().record(nodeCount)
Metrics.QueryShortChannelIds.ChannelAnnouncements.withoutTags().record(channelCount)
Metrics.QueryShortChannelIds.ChannelUpdates.withoutTags().record(updateCount)
log.info("received query_short_channel_ids with {} items, sent back {} channels and {} updates and {} nodes", q.shortChannelIds.array.size, channelCount, updateCount, nodeCount)
log.debug("received query_short_channel_ids with {} items, sent back {} channels and {} updates and {} nodes", q.shortChannelIds.array.size, channelCount, updateCount, nodeCount)
origin.peerConnection ! ReplyShortChannelIdsEnd(q.chainHash, 1)
}

Expand Down
Loading

0 comments on commit 3174b43

Please sign in to comment.