Skip to content

Commit 8245128

Browse files
authored
Merge pull request #1859 from TheBlueMatt/2022-11-rm-redundant-holding-cell-wipe
Wait to free the holding cell during channel_reestablish handling
2 parents 32fdeb7 + e82cfa7 commit 8245128

File tree

5 files changed

+104
-215
lines changed

5 files changed

+104
-215
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 6 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,6 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
324324
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed);
325325
check_added_monitors!(nodes[0], 1);
326326
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
327-
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
328327
}
329328

330329
(update_fulfill_htlcs[0].clone(), commitment_signed.clone())
@@ -633,7 +632,6 @@ fn test_monitor_update_fail_cs() {
633632
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
634633
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg);
635634
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
636-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
637635
check_added_monitors!(nodes[1], 1);
638636
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
639637

@@ -664,7 +662,6 @@ fn test_monitor_update_fail_cs() {
664662
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
665663
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
666664
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
667-
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
668665
check_added_monitors!(nodes[0], 1);
669666
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
670667
},
@@ -726,7 +723,6 @@ fn test_monitor_update_fail_no_rebroadcast() {
726723
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
727724
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa);
728725
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
729-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
730726
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
731727
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
732728
check_added_monitors!(nodes[1], 1);
@@ -784,12 +780,11 @@ fn test_monitor_update_raa_while_paused() {
784780
nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]);
785781
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg);
786782
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
787-
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
788783
check_added_monitors!(nodes[0], 1);
784+
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
789785

790786
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
791787
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
792-
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
793788
check_added_monitors!(nodes[0], 1);
794789

795790
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
@@ -875,7 +870,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
875870
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
876871
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack);
877872
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
878-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
879873
assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
880874
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
881875
check_added_monitors!(nodes[1], 1);
@@ -911,8 +905,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
911905
nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg);
912906
check_added_monitors!(nodes[1], 1);
913907
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
914-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
915-
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
916908
(Some(payment_preimage_4), Some(payment_hash_4))
917909
} else { (None, None) };
918910

@@ -1136,7 +1128,7 @@ fn test_monitor_update_fail_reestablish() {
11361128
get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id())
11371129
.contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected
11381130

1139-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
1131+
nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell
11401132
check_added_monitors!(nodes[1], 1);
11411133

11421134
nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
@@ -1228,12 +1220,11 @@ fn raa_no_response_awaiting_raa_state() {
12281220
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
12291221
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
12301222
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1231-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
12321223
check_added_monitors!(nodes[1], 1);
1224+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
12331225

12341226
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
12351227
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1236-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1);
12371228
check_added_monitors!(nodes[1], 1);
12381229

12391230
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
@@ -1331,7 +1322,6 @@ fn claim_while_disconnected_monitor_update_fail() {
13311322

13321323
nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect);
13331324
let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id());
1334-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
13351325
check_added_monitors!(nodes[1], 1);
13361326
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
13371327

@@ -1348,7 +1338,6 @@ fn claim_while_disconnected_monitor_update_fail() {
13481338
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed);
13491339
check_added_monitors!(nodes[1], 1);
13501340
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1351-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
13521341
// Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
13531342
// until we've channel_monitor_update'd and updated for the new commitment transaction.
13541343

@@ -1440,7 +1429,6 @@ fn monitor_failed_no_reestablish_response() {
14401429
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14411430
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
14421431
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1443-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
14441432
check_added_monitors!(nodes[1], 1);
14451433

14461434
// Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
@@ -1540,7 +1528,6 @@ fn first_message_on_recv_ordering() {
15401528
// to the next message also tests resetting the delivery order.
15411529
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa);
15421530
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1543-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
15441531
check_added_monitors!(nodes[1], 1);
15451532

15461533
// Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
@@ -1550,7 +1537,6 @@ fn first_message_on_recv_ordering() {
15501537
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg);
15511538
check_added_monitors!(nodes[1], 1);
15521539
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1553-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1);
15541540

15551541
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
15561542
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@ -1599,7 +1585,7 @@ fn test_monitor_update_fail_claim() {
15991585
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
16001586
nodes[1].node.claim_funds(payment_preimage_1);
16011587
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
1602-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1588+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
16031589
check_added_monitors!(nodes[1], 1);
16041590

16051591
// Note that at this point there is a pending commitment transaction update for A being held by
@@ -1730,7 +1716,6 @@ fn test_monitor_update_on_pending_forwards() {
17301716
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
17311717
check_added_monitors!(nodes[1], 1);
17321718
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
1733-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
17341719

17351720
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
17361721
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone();
@@ -1791,9 +1776,7 @@ fn monitor_update_claim_fail_no_response() {
17911776
expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000);
17921777
check_added_monitors!(nodes[1], 1);
17931778

1794-
let events = nodes[1].node.get_and_clear_pending_msg_events();
1795-
assert_eq!(events.len(), 0);
1796-
nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1);
1779+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
17971780

17981781
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
17991782
let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();
@@ -1841,9 +1824,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
18411824

18421825
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
18431826
nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
1844-
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
1845-
nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1);
18461827
check_added_monitors!(nodes[0], 1);
1828+
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
18471829
assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
18481830
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed);
18491831
let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone();

lightning/src/ln/channel.rs

Lines changed: 9 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -439,8 +439,6 @@ pub(super) struct ReestablishResponses {
439439
pub raa: Option<msgs::RevokeAndACK>,
440440
pub commitment_update: Option<msgs::CommitmentUpdate>,
441441
pub order: RAACommitmentOrder,
442-
pub mon_update: Option<ChannelMonitorUpdate>,
443-
pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
444442
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
445443
pub shutdown_msg: Option<msgs::Shutdown>,
446444
}
@@ -3964,9 +3962,8 @@ impl<Signer: Sign> Channel<Signer> {
39643962
// Short circuit the whole handler as there is nothing we can resend them
39653963
return Ok(ReestablishResponses {
39663964
channel_ready: None,
3967-
raa: None, commitment_update: None, mon_update: None,
3965+
raa: None, commitment_update: None,
39683966
order: RAACommitmentOrder::CommitmentFirst,
3969-
holding_cell_failed_htlcs: Vec::new(),
39703967
shutdown_msg, announcement_sigs,
39713968
});
39723969
}
@@ -3979,9 +3976,8 @@ impl<Signer: Sign> Channel<Signer> {
39793976
next_per_commitment_point,
39803977
short_channel_id_alias: Some(self.outbound_scid_alias),
39813978
}),
3982-
raa: None, commitment_update: None, mon_update: None,
3979+
raa: None, commitment_update: None,
39833980
order: RAACommitmentOrder::CommitmentFirst,
3984-
holding_cell_failed_htlcs: Vec::new(),
39853981
shutdown_msg, announcement_sigs,
39863982
});
39873983
}
@@ -4024,46 +4020,12 @@ impl<Signer: Sign> Channel<Signer> {
40244020
log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
40254021
}
40264022

4027-
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
4028-
// We're up-to-date and not waiting on a remote revoke (if we are our
4029-
// channel_reestablish should result in them sending a revoke_and_ack), but we may
4030-
// have received some updates while we were disconnected. Free the holding cell
4031-
// now!
4032-
match self.free_holding_cell_htlcs(logger) {
4033-
Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
4034-
Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
4035-
panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
4036-
Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
4037-
Ok(ReestablishResponses {
4038-
channel_ready, shutdown_msg, announcement_sigs,
4039-
raa: required_revoke,
4040-
commitment_update: Some(commitment_update),
4041-
order: self.resend_order.clone(),
4042-
mon_update: Some(monitor_update),
4043-
holding_cell_failed_htlcs,
4044-
})
4045-
},
4046-
Ok((None, holding_cell_failed_htlcs)) => {
4047-
Ok(ReestablishResponses {
4048-
channel_ready, shutdown_msg, announcement_sigs,
4049-
raa: required_revoke,
4050-
commitment_update: None,
4051-
order: self.resend_order.clone(),
4052-
mon_update: None,
4053-
holding_cell_failed_htlcs,
4054-
})
4055-
},
4056-
}
4057-
} else {
4058-
Ok(ReestablishResponses {
4059-
channel_ready, shutdown_msg, announcement_sigs,
4060-
raa: required_revoke,
4061-
commitment_update: None,
4062-
order: self.resend_order.clone(),
4063-
mon_update: None,
4064-
holding_cell_failed_htlcs: Vec::new(),
4065-
})
4066-
}
4023+
Ok(ReestablishResponses {
4024+
channel_ready, shutdown_msg, announcement_sigs,
4025+
raa: required_revoke,
4026+
commitment_update: None,
4027+
order: self.resend_order.clone(),
4028+
})
40674029
} else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
40684030
if required_revoke.is_some() {
40694031
log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
@@ -4075,18 +4037,15 @@ impl<Signer: Sign> Channel<Signer> {
40754037
self.monitor_pending_commitment_signed = true;
40764038
Ok(ReestablishResponses {
40774039
channel_ready, shutdown_msg, announcement_sigs,
4078-
commitment_update: None, raa: None, mon_update: None,
4040+
commitment_update: None, raa: None,
40794041
order: self.resend_order.clone(),
4080-
holding_cell_failed_htlcs: Vec::new(),
40814042
})
40824043
} else {
40834044
Ok(ReestablishResponses {
40844045
channel_ready, shutdown_msg, announcement_sigs,
40854046
raa: required_revoke,
40864047
commitment_update: Some(self.get_last_commitment_update(logger)),
40874048
order: self.resend_order.clone(),
4088-
mon_update: None,
4089-
holding_cell_failed_htlcs: Vec::new(),
40904049
})
40914050
}
40924051
} else {

0 commit comments

Comments
 (0)