Skip to content

Commit

Permalink
Support async ChannelMonitorUpdates to closed chans at startup
Browse files Browse the repository at this point in the history
One of the largest gaps in our async persistence functionality has
been preimage (claim) updates to closed channels. Here we finally
implement support for this (for updates which are generated during
startup).

Thanks to all the work we've built up over the past many commits,
this is a fairly straightforward patch, removing the
immediate-completion logic from `claim_mpp_part` and adding the
required in-flight tracking logic to
`apply_post_close_monitor_update`.

Like in the during-runtime case in the previous commit, we sadly
can't use the `handle_new_monitor_update` macro wholesale as it
handles the `Channel` resumption as well which we don't do here.
  • Loading branch information
TheBlueMatt committed Nov 18, 2024
1 parent 9e590cf commit ce9e5a7
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 38 deletions.
37 changes: 21 additions & 16 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3294,10 +3294,6 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
if !close_chans_before_reload {
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000);
} else {
// While we forwarded the payment a while ago, we don't want to process events too early or
// we'll run background tasks we wanted to test individually.
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, !close_only_a);
}

mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]);
Expand All @@ -3308,24 +3304,33 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool,
// Make sure the B<->C channel is still alive and well by sending a payment over it.
let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]);
reconnect_args.pending_responding_commitment_signed.1 = true;
if !close_chans_before_reload {
// TODO: If the A<->B channel was closed before we reloaded, the `ChannelManager`
// will consider the forwarded payment complete and allow the B<->C
// `ChannelMonitorUpdate` to complete, wiping the payment preimage. This should not
// be allowed, and needs fixing.
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
}
// The B<->C `ChannelMonitorUpdate` shouldn't be allowed to complete, which is the
// equivalent to the responding `commitment_signed` being a duplicate for node B, thus we
// need to set the `pending_responding_commitment_signed_dup` flag.
reconnect_args.pending_responding_commitment_signed_dup_monitor.1 = true;
reconnect_args.pending_raa.1 = true;

reconnect_nodes(reconnect_args);

// Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending
// `PaymentForwarded` event will finally be released.
let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone();
nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id);
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false);
if !close_chans_before_reload {
// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C
// channel will fly, removing the payment preimage from it.
check_added_monitors(&nodes[1], 1);

// If the A<->B channel was closed before we reload, we'll replay the claim against it on
// reload, causing the `PaymentForwarded` event to get replayed.
let evs = nodes[1].node.get_and_clear_pending_events();
assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 });
for ev in evs {
if let Event::PaymentForwarded { .. } = ev { }
else {
panic!();
}
}

// Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C channel
// will fly, removing the payment preimage from it.
check_added_monitors(&nodes[1], 1);
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
send_payment(&nodes[1], &[&nodes[2]], 100_000);
}
Expand Down
45 changes: 23 additions & 22 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3941,11 +3941,10 @@ where
}

/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
#[must_use]
fn apply_post_close_monitor_update(
&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
monitor_update: ChannelMonitorUpdate,
) -> ChannelMonitorUpdateStatus {
) {
// Note that there may be some post-close updates which need to be well-ordered with
// respect to the `update_id`, so we hold the `peer_state` lock here.
let per_peer_state = self.per_peer_state.read().unwrap();
Expand All @@ -3956,16 +3955,21 @@ where
match peer_state.channel_by_id.entry(channel_id) {
hash_map::Entry::Occupied(mut chan_phase) => {
if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
let in_flight = handle_new_monitor_update!(self, funding_txo,
handle_new_monitor_update!(self, funding_txo,
monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
return if in_flight { ChannelMonitorUpdateStatus::InProgress } else { ChannelMonitorUpdateStatus::Completed };
return;
} else {
debug_assert!(false, "We shouldn't have an update for a non-funded channel");
}
},
hash_map::Entry::Vacant(_) => {},
}
self.chain_monitor.update_channel(funding_txo, &monitor_update)
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);

handle_new_monitor_update!(
self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
logger, channel_id, POST_CHANNEL_CLOSE
);
}

/// When a channel is removed, two things need to happen:
Expand Down Expand Up @@ -3994,7 +3998,7 @@ where
}
if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
debug_assert!(false, "This should have been handled in `locked_close_channel`");
let _ = self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
}
if self.background_events_processed_since_startup.load(Ordering::Acquire) {
// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
Expand Down Expand Up @@ -6348,9 +6352,7 @@ where
let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
// The monitor update will be replayed on startup if it doesnt complete, so no
// use bothering to care about the monitor update completing.
let _ = self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
},
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
let per_peer_state = self.per_peer_state.read().unwrap();
Expand Down Expand Up @@ -7296,32 +7298,31 @@ where
let payment_hash = payment_preimage.into();
let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));

if !during_init {
if let Some(action) = action_opt {
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
chan_id, action);
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}
if let Some(action) = action_opt {
log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
chan_id, action);
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
}

if !during_init {
handle_new_monitor_update!(self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state, logger, chan_id, POST_CHANNEL_CLOSE);
} else {
// If we're running during init we cannot update a monitor directly - they probably
// haven't actually been loaded yet. Instead, push the monitor update as a background
// event.
// TODO: Track this update as pending and only complete the completion action when it
// finishes.

let in_flight_updates = peer_state.in_flight_monitor_updates
.entry(prev_hop.funding_txo)
.or_insert_with(Vec::new);
in_flight_updates.push(preimage_update.clone());

let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
counterparty_node_id,
funding_txo: prev_hop.funding_txo,
channel_id: prev_hop.channel_id,
update: preimage_update,
};
self.pending_background_events.lock().unwrap().push(event);

mem::drop(peer_state);
mem::drop(per_peer_state);

self.handle_monitor_update_completion_actions(action_opt);
}
}

Expand Down

0 comments on commit ce9e5a7

Please sign in to comment.