@@ -8688,141 +8688,166 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
86888688 &self.logger,
86898689 );
86908690 },
8691- HTLCSource::PreviousHopData(hop_data) => {
8692- let prev_channel_id = hop_data.channel_id;
8693- let prev_user_channel_id = hop_data.user_channel_id;
8694- let prev_node_id = hop_data.counterparty_node_id;
8695- let completed_blocker =
8696- RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
8697-
8698- // Obtain hold time, if available.
8699- let hold_time = hold_time_since(send_timestamp).unwrap_or(0);
8700-
8701- // If attribution data was received from downstream, we shift it and get it ready for adding our hold
8702- // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA
8703- // to record the hold time like we do for failed HTLCs.
8704- let attribution_data = process_fulfill_attribution_data(
8705- attribution_data,
8706- &hop_data.incoming_packet_shared_secret,
8707- hold_time,
8708- );
8691+ HTLCSource::PreviousHopData(hop_data) => self.claim_funds_from_previous_hop_internal(
8692+ payment_preimage,
8693+ forwarded_htlc_value_msat,
8694+ skimmed_fee_msat,
8695+ from_onchain,
8696+ startup_replay,
8697+ next_channel_counterparty_node_id,
8698+ next_channel_outpoint,
8699+ next_channel_id,
8700+ next_user_channel_id,
8701+ hop_data,
8702+ attribution_data,
8703+ send_timestamp,
8704+ ),
8705+ HTLCSource::TrampolineForward { .. } => todo!(),
8706+ }
8707+ }
87098708
8710- #[cfg(test)]
8711- let claiming_chan_funding_outpoint = hop_data.outpoint;
8712- self.claim_funds_from_hop(
8713- hop_data,
8714- payment_preimage,
8715- None,
8716- Some(attribution_data),
8717- |htlc_claim_value_msat, definitely_duplicate| {
8718- let chan_to_release = Some(EventUnblockedChannel {
8719- counterparty_node_id: next_channel_counterparty_node_id,
8720- funding_txo: next_channel_outpoint,
8721- channel_id: next_channel_id,
8722- blocking_action: completed_blocker,
8723- });
8709+ fn claim_funds_from_previous_hop_internal(
8710+ &self, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>,
8711+ skimmed_fee_msat: Option<u64>, from_onchain: bool, startup_replay: bool,
8712+ next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint,
8713+ next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
8714+ hop_data: HTLCPreviousHopData, attribution_data: Option<AttributionData>,
8715+ send_timestamp: Option<Duration>,
8716+ ) {
8717+ let prev_channel_id = hop_data.channel_id;
8718+ let prev_user_channel_id = hop_data.user_channel_id;
8719+ let prev_node_id = hop_data.counterparty_node_id;
8720+ let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
8721+
8722+ // Obtain hold time, if available.
8723+ let hold_time = hold_time_since(send_timestamp).unwrap_or(0);
8724+
8725+ // If attribution data was received from downstream, we shift it and get it ready for adding our hold
8726+ // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA
8727+ // to record the hold time like we do for failed HTLCs.
8728+ let attribution_data = process_fulfill_attribution_data(
8729+ attribution_data,
8730+ &hop_data.incoming_packet_shared_secret,
8731+ hold_time,
8732+ );
87248733
8725- if definitely_duplicate && startup_replay {
8726- // On startup we may get redundant claims which are related to
8727- // monitor updates still in flight. In that case, we shouldn't
8728- // immediately free, but instead let that monitor update complete
8729- // in the background.
8730- #[cfg(test)]
8731- {
8732- let per_peer_state = self.per_peer_state.deadlocking_read();
8733- // The channel we'd unblock should already be closed, or...
8734- let channel_closed = per_peer_state
8735- .get(&next_channel_counterparty_node_id)
8736- .map(|lck| lck.deadlocking_lock())
8737- .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id))
8738- .unwrap_or(true);
8739- let background_events =
8740- self.pending_background_events.lock().unwrap();
8741- // there should be a `BackgroundEvent` pending...
8742- let matching_bg_event =
8743- background_events.iter().any(|ev| {
8744- match ev {
8745- // to apply a monitor update that blocked the claiming channel,
8746- BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
8747- funding_txo, update, ..
8748- } => {
8749- if *funding_txo == claiming_chan_funding_outpoint {
8750- assert!(update.updates.iter().any(|upd|
8751- if let ChannelMonitorUpdateStep::PaymentPreimage {
8734+ #[cfg(test)]
8735+ let claiming_chan_funding_outpoint = hop_data.outpoint;
8736+ self.claim_funds_from_hop(
8737+ hop_data,
8738+ payment_preimage,
8739+ None,
8740+ Some(attribution_data),
8741+ |htlc_claim_value_msat, definitely_duplicate| {
8742+ let chan_to_release = Some(EventUnblockedChannel {
8743+ counterparty_node_id: next_channel_counterparty_node_id,
8744+ funding_txo: next_channel_outpoint,
8745+ channel_id: next_channel_id,
8746+ blocking_action: completed_blocker,
8747+ });
8748+
8749+ if definitely_duplicate && startup_replay {
8750+ // On startup we may get redundant claims which are related to
8751+ // monitor updates still in flight. In that case, we shouldn't
8752+ // immediately free, but instead let that monitor update complete
8753+ // in the background.
8754+ #[cfg(test)]
8755+ {
8756+ let per_peer_state = self.per_peer_state.deadlocking_read();
8757+ // The channel we'd unblock should already be closed, or...
8758+ let channel_closed = per_peer_state
8759+ .get(&next_channel_counterparty_node_id)
8760+ .map(|lck| lck.deadlocking_lock())
8761+ .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id))
8762+ .unwrap_or(true);
8763+ let background_events = self.pending_background_events.lock().unwrap();
8764+ // there should be a `BackgroundEvent` pending...
8765+ let matching_bg_event =
8766+ background_events.iter().any(|ev| {
8767+ match ev {
8768+ // to apply a monitor update that blocked the claiming channel,
8769+ BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
8770+ funding_txo,
8771+ update,
8772+ ..
8773+ } => {
8774+ if *funding_txo == claiming_chan_funding_outpoint {
8775+ assert!(
8776+ update.updates.iter().any(|upd| {
8777+ if let ChannelMonitorUpdateStep::PaymentPreimage {
87528778 payment_preimage: update_preimage, ..
87538779 } = upd {
87548780 payment_preimage == *update_preimage
87558781 } else { false }
8756- ), "{:?}", update);
8757- true
8758- } else { false }
8759- },
8760- // or the monitor update has completed and will unblock
8761- // immediately once we get going.
8762- BackgroundEvent::MonitorUpdatesComplete {
8763- channel_id, ..
8764- } =>
8765- *channel_id == prev_channel_id,
8782+ }),
8783+ "{:?}",
8784+ update
8785+ );
8786+ true
8787+ } else {
8788+ false
87668789 }
8767- });
8768- assert!(
8769- channel_closed || matching_bg_event,
8770- "{:?}",
8771- *background_events
8772- );
8773- }
8774- (None, None)
8775- } else if definitely_duplicate {
8776- if let Some(other_chan) = chan_to_release {
8777- (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
8778- downstream_counterparty_node_id: other_chan.counterparty_node_id,
8779- downstream_channel_id: other_chan.channel_id,
8780- blocking_action: other_chan.blocking_action,
8781- }), None)
8790+ },
8791+ // or the monitor update has completed and will unblock
8792+ // immediately once we get going.
8793+ BackgroundEvent::MonitorUpdatesComplete {
8794+ channel_id, ..
8795+ } => *channel_id == prev_channel_id,
8796+ }
8797+ });
8798+ assert!(channel_closed || matching_bg_event, "{:?}", *background_events);
8799+ }
8800+ (None, None)
8801+ } else if definitely_duplicate {
8802+ if let Some(other_chan) = chan_to_release {
8803+ (
8804+ Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
8805+ downstream_counterparty_node_id: other_chan.counterparty_node_id,
8806+ downstream_channel_id: other_chan.channel_id,
8807+ blocking_action: other_chan.blocking_action,
8808+ }),
8809+ None,
8810+ )
8811+ } else {
8812+ (None, None)
8813+ }
8814+ } else {
8815+ let total_fee_earned_msat =
8816+ if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
8817+ if let Some(claimed_htlc_value) = htlc_claim_value_msat {
8818+ Some(claimed_htlc_value - forwarded_htlc_value)
87828819 } else {
8783- ( None, None)
8820+ None
87848821 }
87858822 } else {
8786- let total_fee_earned_msat =
8787- if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
8788- if let Some(claimed_htlc_value) = htlc_claim_value_msat {
8789- Some(claimed_htlc_value - forwarded_htlc_value)
8790- } else {
8791- None
8792- }
8793- } else {
8794- None
8795- };
8796- debug_assert!(
8797- skimmed_fee_msat <= total_fee_earned_msat,
8798- "skimmed_fee_msat must always be included in total_fee_earned_msat"
8799- );
8800- (
8801- Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
8802- event: events::Event::PaymentForwarded {
8803- prev_channel_id: Some(prev_channel_id),
8804- next_channel_id: Some(next_channel_id),
8805- prev_user_channel_id,
8806- next_user_channel_id,
8807- prev_node_id,
8808- next_node_id: Some(next_channel_counterparty_node_id),
8809- total_fee_earned_msat,
8810- skimmed_fee_msat,
8811- claim_from_onchain_tx: from_onchain,
8812- outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
8813- },
8814- downstream_counterparty_and_funding_outpoint: chan_to_release,
8815- }),
8816- None,
8817- )
8818- }
8819- },
8820- );
8823+ None
8824+ };
8825+ debug_assert!(
8826+ skimmed_fee_msat <= total_fee_earned_msat,
8827+ "skimmed_fee_msat must always be included in total_fee_earned_msat"
8828+ );
8829+ (
8830+ Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
8831+ event: events::Event::PaymentForwarded {
8832+ prev_channel_id: Some(prev_channel_id),
8833+ next_channel_id: Some(next_channel_id),
8834+ prev_user_channel_id,
8835+ next_user_channel_id,
8836+ prev_node_id,
8837+ next_node_id: Some(next_channel_counterparty_node_id),
8838+ total_fee_earned_msat,
8839+ skimmed_fee_msat,
8840+ claim_from_onchain_tx: from_onchain,
8841+ outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
8842+ },
8843+ downstream_counterparty_and_funding_outpoint: chan_to_release,
8844+ }),
8845+ None,
8846+ )
8847+ }
88218848 },
8822- HTLCSource::TrampolineForward { .. } => todo!(),
8823- }
8849+ )
88248850 }
8825-
88268851 /// Gets the node_id held by this ChannelManager
88278852 pub fn get_our_node_id(&self) -> PublicKey {
88288853 self.our_network_pubkey
0 commit comments