@@ -11758,6 +11758,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/
1175811758
1175911759 if !new_intercept_events.is_empty() {
1176011760 let mut events = self.pending_events.lock().unwrap();
11761+ // It's possible we processed this intercept forward, generated an event, then re-processed
11762+ // it here after restart, in which case the intercept event should not be pushed
11763+ // redundantly.
11764+ new_intercept_events.retain(|ev| !events.contains(ev));
1176111765 events.append(&mut new_intercept_events);
1176211766 }
1176311767 }
@@ -17494,9 +17498,9 @@ where
1749417498
1749517499 const MAX_ALLOC_SIZE: usize = 1024 * 64;
1749617500 let forward_htlcs_count: u64 = Readable::read(reader)?;
17497- // This map is read but may no longer be used because we'll attempt to rebuild the set of HTLC
17498- // forwards from the `Channel{Monitor}`s instead, as a step towards removing the requirement of
17499- // regularly persisting the `ChannelManager` .
17501+ // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of
17502+ // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from
17503+ // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` usage below .
1750017504 let mut forward_htlcs_legacy: HashMap<u64, Vec<HTLCForwardInfo>> =
1750117505 hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
1750217506 for _ in 0..forward_htlcs_count {
@@ -17597,9 +17601,9 @@ where
1759717601 };
1759817602 }
1759917603
17600- // Some maps are read but may no longer be used because we attempt to rebuild the pending HTLC
17601- // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of
17602- // regularly persisting the `ChannelManager` .
17604+ // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of
17605+ // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from
17606+ // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` below .
1760317607 let mut pending_intercepted_htlcs_legacy: Option<HashMap<InterceptId, PendingAddHTLCInfo>> =
1760417608 None;
1760517609 let mut decode_update_add_htlcs_legacy: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> =
@@ -17940,6 +17944,25 @@ where
1794017944 pending_background_events.push(new_event);
1794117945 }
1794217946
17947+ // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and
17948+ // persist that state, relying on it being up-to-date on restart. Newer versions are moving
17949+ // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead
17950+ // reconstruct HTLC/payment state based on `Channel{Monitor}` data if
17951+ // `reconstruct_manager_from_monitors` is set below. Currently it is only set in tests, randomly
17952+ // to ensure the legacy codepaths also have test coverage.
17953+ #[cfg(not(test))]
17954+ let reconstruct_manager_from_monitors = false;
17955+ #[cfg(test)]
17956+ let reconstruct_manager_from_monitors = {
17957+ use core::hash::{BuildHasher, Hasher};
17958+ let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
17959+ if rand_val % 2 == 0 {
17960+ true
17961+ } else {
17962+ false
17963+ }
17964+ };
17965+
1794317966 // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
1794417967 // should ensure we try them again on the inbound edge. We put them here and do so after we
1794517968 // have a fully-constructed `ChannelManager` at the end.
@@ -17964,18 +17987,20 @@ where
1796417987 let mut peer_state_lock = peer_state_mtx.lock().unwrap();
1796517988 let peer_state = &mut *peer_state_lock;
1796617989 is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
17967- if let Some(chan) = peer_state.channel_by_id.get(channel_id) {
17968- if let Some(funded_chan) = chan.as_funded() {
17969- let inbound_committed_update_adds =
17970- funded_chan.get_inbound_committed_update_adds();
17971- if !inbound_committed_update_adds.is_empty() {
17972- // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized
17973- // `Channel`, as part of removing the requirement to regularly persist the
17974- // `ChannelManager`.
17975- decode_update_add_htlcs.insert(
17976- funded_chan.context.outbound_scid_alias(),
17977- inbound_committed_update_adds,
17978- );
17990+ if reconstruct_manager_from_monitors {
17991+ if let Some(chan) = peer_state.channel_by_id.get(channel_id) {
17992+ if let Some(funded_chan) = chan.as_funded() {
17993+ let inbound_committed_update_adds =
17994+ funded_chan.get_inbound_committed_update_adds();
17995+ if !inbound_committed_update_adds.is_empty() {
17996+ // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized
17997+ // `Channel`, as part of removing the requirement to regularly persist the
17998+ // `ChannelManager`.
17999+ decode_update_add_htlcs.insert(
18000+ funded_chan.context.outbound_scid_alias(),
18001+ inbound_committed_update_adds,
18002+ );
18003+ }
1797918004 }
1798018005 }
1798118006 }
@@ -18030,17 +18055,20 @@ where
1803018055 info.prev_funding_outpoint == prev_hop_data.outpoint
1803118056 && info.prev_htlc_id == prev_hop_data.htlc_id
1803218057 };
18033- // We always add all inbound committed HTLCs to `decode_update_add_htlcs` in the above
18034- // loop, but we need to prune from those added HTLCs if they were already forwarded to
18035- // the outbound edge. Otherwise, we'll double-forward.
18036- dedup_decode_update_add_htlcs(
18037- &mut decode_update_add_htlcs,
18038- &prev_hop_data,
18039- "HTLC was forwarded to the closed channel",
18040- &args.logger,
18041- );
18058+ // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed
18059+ // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from
18060+ // those added HTLCs if they were already forwarded to the outbound edge. Otherwise,
18061+ // we'll double-forward.
18062+ if reconstruct_manager_from_monitors {
18063+ dedup_decode_update_add_htlcs(
18064+ &mut decode_update_add_htlcs,
18065+ &prev_hop_data,
18066+ "HTLC was forwarded to the closed channel",
18067+ &args.logger,
18068+ );
18069+ }
1804218070
18043- if !is_channel_closed {
18071+ if !is_channel_closed || reconstruct_manager_from_monitors {
1804418072 continue;
1804518073 }
1804618074 // The ChannelMonitor is now responsible for this HTLC's
@@ -18549,99 +18577,55 @@ where
1854918577 }
1855018578 }
1855118579
18552- // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`.
18553- // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs.
18554- for (src, _, _, _, _, _) in failed_htlcs.iter() {
18555- if let HTLCSource::PreviousHopData(prev_hop_data) = src {
18556- dedup_decode_update_add_htlcs(
18557- &mut decode_update_add_htlcs,
18558- prev_hop_data,
18559- "HTLC was failed backwards during manager read",
18560- &args.logger,
18561- );
18562- }
18563- }
18564-
18565- // See above comment on `failed_htlcs`.
18566- for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) {
18567- for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) {
18568- dedup_decode_update_add_htlcs(
18569- &mut decode_update_add_htlcs,
18570- prev_hop_data,
18571- "HTLC was already decoded and marked as a claimable payment",
18572- &args.logger,
18573- );
18574- }
18575- }
18576-
18577- // Remove HTLCs from `forward_htlcs` if they are also present in `decode_update_add_htlcs`.
18578- //
18579- // In the future, the full set of pending HTLCs will be pulled from `Channel{Monitor}` data and
18580- // placed in `ChannelManager::decode_update_add_htlcs` on read, to be handled on the next call
18581- // to `process_pending_htlc_forwards`. This is part of a larger effort to remove the requirement
18582- // of regularly persisting the `ChannelManager`. The new pipeline is supported for HTLC forwards
18583- // received on LDK 0.3+ but not <= 0.2, so prune non-legacy HTLCs from `forward_htlcs`.
18584- forward_htlcs_legacy.retain(|scid, pending_fwds| {
18585- for fwd in pending_fwds {
18586- let (prev_scid, prev_htlc_id) = match fwd {
18587- HTLCForwardInfo::AddHTLC(htlc) => {
18588- (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id)
18589- },
18590- HTLCForwardInfo::FailHTLC { htlc_id, .. }
18591- | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id),
18592- };
18593- if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) {
18594- if pending_update_adds
18595- .iter()
18596- .any(|update_add| update_add.htlc_id == prev_htlc_id)
18597- {
18598- return false;
18599- }
18580+ if reconstruct_manager_from_monitors {
18581+ // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`.
18582+ // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs.
18583+ for (src, _, _, _, _, _) in failed_htlcs.iter() {
18584+ if let HTLCSource::PreviousHopData(prev_hop_data) = src {
18585+ dedup_decode_update_add_htlcs(
18586+ &mut decode_update_add_htlcs,
18587+ prev_hop_data,
18588+ "HTLC was failed backwards during manager read",
18589+ &args.logger,
18590+ );
1860018591 }
1860118592 }
18602- true
18603- });
18604- // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See
18605- // the above comment.
18606- pending_intercepted_htlcs_legacy.retain(|id, fwd| {
18607- let prev_scid = fwd.prev_outbound_scid_alias;
18608- if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) {
18609- if pending_update_adds
18610- .iter()
18611- .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id)
18612- {
18613- pending_events_read.retain(
18614- |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id),
18593+
18594+ // See above comment on `failed_htlcs`.
18595+ for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) {
18596+ for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) {
18597+ dedup_decode_update_add_htlcs(
18598+ &mut decode_update_add_htlcs,
18599+ prev_hop_data,
18600+ "HTLC was already decoded and marked as a claimable payment",
18601+ &args.logger,
1861518602 );
18616- return false;
1861718603 }
1861818604 }
18605+ }
18606+
18607+ let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) =
18608+ if reconstruct_manager_from_monitors {
18609+ (decode_update_add_htlcs, new_hash_map(), new_hash_map())
18610+ } else {
18611+ (
18612+ decode_update_add_htlcs_legacy,
18613+ forward_htlcs_legacy,
18614+ pending_intercepted_htlcs_legacy,
18615+ )
18616+ };
18617+
18618+ // If we have a pending intercept HTLC present but no corresponding event, add that now rather
18619+ // than relying on the user having persisted the event prior to shutdown.
18620+ for (id, fwd) in pending_intercepted_htlcs.iter() {
1861918621 if !pending_events_read.iter().any(
1862018622 |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id),
1862118623 ) {
18622- match create_htlc_intercepted_event(*id, & fwd) {
18624+ match create_htlc_intercepted_event(*id, fwd) {
1862318625 Ok(ev) => pending_events_read.push_back((ev, None)),
1862418626 Err(()) => debug_assert!(false),
1862518627 }
1862618628 }
18627- true
18628- });
18629- // Add legacy update_adds that were received on LDK <= 0.2 that are not present in the
18630- // `decode_update_add_htlcs` map that was rebuilt from `Channel{Monitor}` data, see above
18631- // comment.
18632- for (scid, legacy_update_adds) in decode_update_add_htlcs_legacy.drain() {
18633- match decode_update_add_htlcs.entry(scid) {
18634- hash_map::Entry::Occupied(mut update_adds) => {
18635- for legacy_update_add in legacy_update_adds {
18636- if !update_adds.get().contains(&legacy_update_add) {
18637- update_adds.get_mut().push(legacy_update_add);
18638- }
18639- }
18640- },
18641- hash_map::Entry::Vacant(entry) => {
18642- entry.insert(legacy_update_adds);
18643- },
18644- }
1864518629 }
1864618630
1864718631 let best_block = BestBlock::new(best_block_hash, best_block_height);
@@ -18670,9 +18654,9 @@ where
1867018654
1867118655 inbound_payment_key: expanded_inbound_key,
1867218656 pending_outbound_payments: pending_outbounds,
18673- pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy ),
18657+ pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs ),
1867418658
18675- forward_htlcs: Mutex::new(forward_htlcs_legacy ),
18659+ forward_htlcs: Mutex::new(forward_htlcs ),
1867618660 decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
1867718661 claimable_payments: Mutex::new(ClaimablePayments {
1867818662 claimable_payments,
@@ -19008,12 +18992,11 @@ where
1900818992mod tests {
1900918993 use crate::events::{ClosureReason, Event, HTLCHandlingFailureType};
1901018994 use crate::ln::channelmanager::{
19011- create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId,
18995+ create_recv_pending_htlc_info, inbound_payment, InterceptId, PaymentId,
1901218996 RecipientOnionFields,
1901318997 };
1901418998 use crate::ln::functional_test_utils::*;
1901518999 use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent};
19016- use crate::ln::onion_utils::AttributionData;
1901719000 use crate::ln::onion_utils::{self, LocalHTLCFailureReason};
1901819001 use crate::ln::outbound_payment::Retry;
1901919002 use crate::ln::types::ChannelId;
@@ -19023,7 +19006,6 @@ mod tests {
1902319006 use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
1902419007 use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
1902519008 use crate::util::errors::APIError;
19026- use crate::util::ser::Writeable;
1902719009 use crate::util::test_utils;
1902819010 use bitcoin::secp256k1::ecdh::SharedSecret;
1902919011 use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
@@ -20081,66 +20063,6 @@ mod tests {
2008120063 check_spends!(txn[0], funding_tx);
2008220064 }
2008320065 }
20084-
20085- #[test]
20086- #[rustfmt::skip]
20087- fn test_malformed_forward_htlcs_ser() {
20088- // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
20089- let chanmon_cfg = create_chanmon_cfgs(1);
20090- let node_cfg = create_node_cfgs(1, &chanmon_cfg);
20091- let persister;
20092- let chain_monitor;
20093- let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
20094- let deserialized_chanmgr;
20095- let mut nodes = create_network(1, &node_cfg, &chanmgrs);
20096-
20097- let dummy_failed_htlc = |htlc_id| {
20098- HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } }
20099- };
20100- let dummy_malformed_htlc = |htlc_id| {
20101- HTLCForwardInfo::FailMalformedHTLC {
20102- htlc_id,
20103- failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(),
20104- sha256_of_onion: [0; 32],
20105- }
20106- };
20107-
20108- let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
20109- if htlc_id % 2 == 0 {
20110- dummy_failed_htlc(htlc_id)
20111- } else {
20112- dummy_malformed_htlc(htlc_id)
20113- }
20114- }).collect();
20115-
20116- let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
20117- if htlc_id % 2 == 1 {
20118- dummy_failed_htlc(htlc_id)
20119- } else {
20120- dummy_malformed_htlc(htlc_id)
20121- }
20122- }).collect();
20123-
20124-
20125- let (scid_1, scid_2) = (42, 43);
20126- let mut forward_htlcs = new_hash_map();
20127- forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
20128- forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
20129-
20130- let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
20131- *chanmgr_fwd_htlcs = forward_htlcs.clone();
20132- core::mem::drop(chanmgr_fwd_htlcs);
20133-
20134- reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
20135-
20136- let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
20137- for scid in [scid_1, scid_2].iter() {
20138- let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
20139- assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
20140- }
20141- assert!(deserialized_fwd_htlcs.is_empty());
20142- core::mem::drop(deserialized_fwd_htlcs);
20143- }
2014420066}
2014520067
2014620068#[cfg(ldk_bench)]
0 commit comments