Skip to content

Commit cf55b97

Browse files
committed
Persist ChannelMonitors after new blocks are connected
This resolves several user complaints (and issues in the sample node) where startup is substantially delayed as we're always waiting for the chain data to sync. Further, in an upcoming PR, we'll be reloading pending payments from ChannelMonitors on restart, at which point we'll need the change here which avoids handling events until after the user has confirmed the `ChannelMonitor` has been persisted to disk. It will avoid a race where we * send a payment/HTLC (persisting the monitor to disk with the HTLC pending), * force-close the channel, removing the channel entry from the ChannelManager entirely, * persist the ChannelManager, * connect a block which contains a fulfill of the HTLC, generating a claim event, * handle the claim event while the `ChannelMonitor` is being persisted, * persist the ChannelManager (before the CHannelMonitor is persisted fully), * restart, reloading the HTLC as a pending payment in the ChannelManager, which now has no references to it except from the ChannelMonitor which still has the pending HTLC, * replay the block connection, generating a duplicate PaymentSent event.
1 parent 6d54db4 commit cf55b97

File tree

7 files changed

+148
-44
lines changed

7 files changed

+148
-44
lines changed

fuzz/src/utils/test_persister.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ impl chainmonitor::Persist<EnforcingSigner> for TestPersister {
1414
self.update_ret.lock().unwrap().clone()
1515
}
1616

17-
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &channelmonitor::ChannelMonitorUpdate, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
17+
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<EnforcingSigner>, _update_id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
1818
self.update_ret.lock().unwrap().clone()
1919
}
2020
}

lightning-persister/src/lib.rs

+6-1
Original file line numberDiff line numberDiff line change
@@ -159,13 +159,18 @@ impl FilesystemPersister {
159159
}
160160

161161
impl<ChannelSigner: Sign> chainmonitor::Persist<ChannelSigner> for FilesystemPersister {
162+
// TODO: We really need a way for the persister to inform the user that its time to crash/shut
163+
// down once these start returning failure.
164+
// A PermanentFailure implies we need to shut down since we're force-closing channels without
165+
// even broadcasting!
166+
162167
fn persist_new_channel(&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
163168
let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
164169
util::write_to_file(self.path_to_monitor_data(), filename, monitor)
165170
.map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)
166171
}
167172

168-
fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &ChannelMonitorUpdate, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
173+
fn update_persisted_channel(&self, funding_txo: OutPoint, _update: &Option<ChannelMonitorUpdate>, monitor: &ChannelMonitor<ChannelSigner>, _update_id: chainmonitor::MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
169174
let filename = format!("{}_{}", funding_txo.txid.to_hex(), funding_txo.index);
170175
util::write_to_file(self.path_to_monitor_data(), filename, monitor)
171176
.map_err(|_| chain::ChannelMonitorUpdateErr::PermanentFailure)

lightning/src/chain/chainmonitor.rs

+95-25
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ use chain::chaininterface::{BroadcasterInterface, FeeEstimator};
3232
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, MonitorUpdated, TransactionOutputs};
3333
use chain::transaction::{OutPoint, TransactionData};
3434
use chain::keysinterface::Sign;
35+
use util::atomic_counter::AtomicCounter;
3536
use util::logger::Logger;
3637
use util::events;
3738
use util::events::EventHandler;
@@ -40,10 +41,12 @@ use ln::channelmanager::ChannelDetails;
4041
use prelude::*;
4142
use sync::{RwLock, RwLockReadGuard, Mutex};
4243
use core::ops::Deref;
44+
use core::sync::atomic::{AtomicBool, Ordering};
4345

4446
#[derive(Clone, Copy, Hash, PartialEq, Eq)]
4547
pub(crate) enum MonitorUpdate {
4648
MonitorUpdateId(u64),
49+
SyncPersistId(u64),
4750
}
4851

4952
/// An opaque identifier describing a specific [`Persist`] method call.
@@ -90,6 +93,9 @@ pub trait Persist<ChannelSigner: Sign> {
9093
/// updated monitor itself to disk/backups. See the `Persist` trait documentation for more
9194
/// details.
9295
///
96+
/// During blockchain synchronization operations, this may be called with no
97+
/// [`ChannelMonitorUpdate`], in which case the full [`ChannelMonitor`] needs to be persisted.
98+
///
9399
/// If an implementer chooses to persist the updates only, they need to make
94100
/// sure that all the updates are applied to the `ChannelMonitors` *before*
95101
/// the set of channel monitors is given to the `ChannelManager`
@@ -107,7 +113,7 @@ pub trait Persist<ChannelSigner: Sign> {
107113
/// [`ChannelMonitorUpdateErr`] for requirements when returning errors.
108114
///
109115
/// [`Writeable::write`]: crate::util::ser::Writeable::write
110-
fn update_persisted_channel(&self, id: OutPoint, update: &ChannelMonitorUpdate, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
116+
fn update_persisted_channel(&self, id: OutPoint, update: &Option<ChannelMonitorUpdate>, data: &ChannelMonitor<ChannelSigner>, update_id: MonitorUpdateId) -> Result<(), ChannelMonitorUpdateErr>;
111117
}
112118

113119
struct MonitorHolder<ChannelSigner: Sign> {
@@ -118,7 +124,24 @@ struct MonitorHolder<ChannelSigner: Sign> {
118124
/// update_persisted_channel, the user returns a TemporaryFailure, and then calls
119125
/// channel_monitor_updated immediately, racing our insertion of the pending update into the
120126
/// contained Vec.
127+
///
128+
/// Beyond the synchronization of updates themselves, we cannot handle user events until after
129+
/// any chain updates have been stored on disk. Thus, we scan this list when returning updates
130+
/// to the ChannelManager, refusing to return any updates for a ChannelMonitor which is still
131+
/// being persisted fully ro disk after a chain update.
132+
///
133+
/// This avoids the possibility of handling, e.g. an on-chain claim, generating a claim monitor
134+
/// event, resulting in the relevant ChannelManager generating a PaymentSent event and dropping
135+
/// the pending payment entry, and then reloading before the monitor is persisted, resulting in
136+
/// the ChannelManager re-adding the same payment entry, before the same block is replayed,
137+
/// resulting in a duplicate PaymentSent event.
121138
pending_monitor_updates: Mutex<Vec<MonitorUpdateId>>,
139+
/// When the user returns a PermanentFailure error from an update_persisted_channel call during
140+
/// block processing, we inform the ChannelManager that the channel should be closed
141+
/// asynchronously. In order to ensure no further changes happen before the ChannelManager has
142+
/// processed the closure event, we set this to true and return PermanentFailure for any other
143+
/// chain::Watch events.
144+
channel_closed: AtomicBool,
122145
}
123146

124147
/// A read-only reference to a current ChannelMonitor.
@@ -154,6 +177,7 @@ pub struct ChainMonitor<ChannelSigner: Sign, C: Deref, T: Deref, F: Deref, L: De
154177
P::Target: Persist<ChannelSigner>,
155178
{
156179
monitors: RwLock<HashMap<OutPoint, MonitorHolder<ChannelSigner>>>,
180+
sync_persistence_id: AtomicCounter,
157181
chain_source: Option<C>,
158182
broadcaster: T,
159183
logger: L,
@@ -183,26 +207,50 @@ where C::Target: chain::Filter,
183207
FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs>
184208
{
185209
let mut dependent_txdata = Vec::new();
186-
let monitors = self.monitors.read().unwrap();
187-
for monitor_state in monitors.values() {
188-
let mut txn_outputs = process(&monitor_state.monitor, txdata);
210+
{
211+
let monitors = self.monitors.write().unwrap();
212+
for (funding_outpoint, monitor_state) in monitors.iter() {
213+
let monitor = &monitor_state.monitor;
214+
let mut txn_outputs;
215+
{
216+
txn_outputs = process(monitor, txdata);
217+
let update_id = MonitorUpdateId {
218+
contents: MonitorUpdate::SyncPersistId(self.sync_persistence_id.get_increment()),
219+
};
220+
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
221+
222+
log_trace!(self.logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
223+
match self.persister.update_persisted_channel(*funding_outpoint, &None, monitor, update_id) {
224+
Ok(()) =>
225+
log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)),
226+
Err(ChannelMonitorUpdateErr::PermanentFailure) => {
227+
monitor_state.channel_closed.store(true, Ordering::Release);
228+
self.user_provided_events.lock().unwrap().push(MonitorEvent::UpdateFailed(*funding_outpoint));
229+
},
230+
Err(ChannelMonitorUpdateErr::TemporaryFailure) => {
231+
log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor));
232+
pending_monitor_updates.push(update_id);
233+
},
234+
}
235+
}
189236

190-
// Register any new outputs with the chain source for filtering, storing any dependent
191-
// transactions from within the block that previously had not been included in txdata.
192-
if let Some(ref chain_source) = self.chain_source {
193-
let block_hash = header.block_hash();
194-
for (txid, mut outputs) in txn_outputs.drain(..) {
195-
for (idx, output) in outputs.drain(..) {
196-
// Register any new outputs with the chain source for filtering and recurse
197-
// if it indicates that there are dependent transactions within the block
198-
// that had not been previously included in txdata.
199-
let output = WatchedOutput {
200-
block_hash: Some(block_hash),
201-
outpoint: OutPoint { txid, index: idx as u16 },
202-
script_pubkey: output.script_pubkey,
203-
};
204-
if let Some(tx) = chain_source.register_output(output) {
205-
dependent_txdata.push(tx);
237+
// Register any new outputs with the chain source for filtering, storing any dependent
238+
// transactions from within the block that previously had not been included in txdata.
239+
if let Some(ref chain_source) = self.chain_source {
240+
let block_hash = header.block_hash();
241+
for (txid, mut outputs) in txn_outputs.drain(..) {
242+
for (idx, output) in outputs.drain(..) {
243+
// Register any new outputs with the chain source for filtering and recurse
244+
// if it indicates that there are dependent transactions within the block
245+
// that had not been previously included in txdata.
246+
let output = WatchedOutput {
247+
block_hash: Some(block_hash),
248+
outpoint: OutPoint { txid, index: idx as u16 },
249+
script_pubkey: output.script_pubkey,
250+
};
251+
if let Some(tx) = chain_source.register_output(output) {
252+
dependent_txdata.push(tx);
253+
}
206254
}
207255
}
208256
}
@@ -228,6 +276,7 @@ where C::Target: chain::Filter,
228276
pub fn new(chain_source: Option<C>, broadcaster: T, logger: L, feeest: F, persister: P) -> Self {
229277
Self {
230278
monitors: RwLock::new(HashMap::new()),
279+
sync_persistence_id: AtomicCounter::new(),
231280
chain_source,
232281
broadcaster,
233282
logger,
@@ -300,7 +349,7 @@ where C::Target: chain::Filter,
300349
pending_monitor_updates.retain(|update_id| *update_id != completed_update_id);
301350

302351
match completed_update_id {
303-
MonitorUpdateId { .. } => {
352+
MonitorUpdateId { contents: MonitorUpdate::MonitorUpdateId(_) } => {
304353
let monitor_update_pending_updates = pending_monitor_updates.iter().filter(|update_id|
305354
if let MonitorUpdate::MonitorUpdateId(_) = update_id.contents { true } else { false }).count();
306355
if monitor_update_pending_updates != 0 {
@@ -312,7 +361,12 @@ where C::Target: chain::Filter,
312361
funding_txo,
313362
monitor_update_id: monitor_data.monitor.get_latest_update_id(),
314363
}));
315-
}
364+
},
365+
MonitorUpdateId { contents: MonitorUpdate::SyncPersistId(_) } => {
366+
// We've already done everything we need to, the next time release_monitor_events
367+
// is called, any events for this ChannelMonitor will be returned if there's no
368+
// more SyncPersistId events left.
369+
},
316370
}
317371
}
318372

@@ -458,7 +512,11 @@ where C::Target: chain::Filter,
458512
monitor.load_outputs_to_watch(chain_source);
459513
}
460514
}
461-
entry.insert(MonitorHolder { monitor, pending_monitor_updates: Mutex::new(pending_monitor_updates) });
515+
entry.insert(MonitorHolder {
516+
monitor,
517+
pending_monitor_updates: Mutex::new(pending_monitor_updates),
518+
channel_closed: AtomicBool::new(false),
519+
});
462520
update_res
463521
}
464522

@@ -492,7 +550,7 @@ where C::Target: chain::Filter,
492550
contents: MonitorUpdate::MonitorUpdateId(update.update_id),
493551
};
494552
let mut pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap();
495-
let persist_res = self.persister.update_persisted_channel(funding_txo, &update, monitor, update_id);
553+
let persist_res = self.persister.update_persisted_channel(funding_txo, &Some(update), monitor, update_id);
496554
if let Err(e) = persist_res {
497555
if e == ChannelMonitorUpdateErr::TemporaryFailure {
498556
pending_monitor_updates.push(update_id);
@@ -501,6 +559,8 @@ where C::Target: chain::Filter,
501559
}
502560
if update_res.is_err() {
503561
Err(ChannelMonitorUpdateErr::PermanentFailure)
562+
} else if monitor_state.channel_closed.load(Ordering::Acquire) {
563+
Err(ChannelMonitorUpdateErr::PermanentFailure)
504564
} else {
505565
persist_res
506566
}
@@ -511,7 +571,17 @@ where C::Target: chain::Filter,
511571
fn release_pending_monitor_events(&self) -> Vec<MonitorEvent> {
512572
let mut pending_monitor_events = self.user_provided_events.lock().unwrap().split_off(0);
513573
for monitor_state in self.monitors.read().unwrap().values() {
514-
pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events());
574+
let pending_monitor_update_count = monitor_state.pending_monitor_updates.lock().unwrap()
575+
.iter().filter(|update_id|
576+
if let MonitorUpdate::SyncPersistId(_) = update_id.contents { true } else { false })
577+
.count();
578+
if pending_monitor_update_count > 0 {
579+
log_info!(self.logger, "A Channel Monitor sync is still in progress, refusing to provide monitor events!");
580+
} else if monitor_state.channel_closed.load(Ordering::Acquire) {
581+
log_info!(self.logger, "A Channel Monitor sync failed, refusing to provide monitor events!");
582+
} else {
583+
pending_monitor_events.append(&mut monitor_state.monitor.get_and_clear_pending_monitor_events());
584+
}
515585
}
516586
pending_monitor_events
517587
}

lightning/src/chain/channelmonitor.rs

+19-1
Original file line numberDiff line numberDiff line change
@@ -157,12 +157,20 @@ pub enum MonitorEvent {
157157
///
158158
/// [`ChannelMonitorUpdateErr::TemporaryFailure`]: super::ChannelMonitorUpdateErr::TemporaryFailure
159159
UpdateCompleted(MonitorUpdated),
160+
161+
/// Indicates a [`ChannelMonitor`] update has failed. See
162+
/// [`ChannelMonitorUpdateErr::PermanentFailure`] for more information on how this is used.
163+
///
164+
/// [`ChannelMonitorUpdateErr::PermanentFailure`]: super::ChannelMonitorUpdateErr::PermanentFailure
165+
UpdateFailed(OutPoint),
160166
}
161167
impl_writeable_tlv_based_enum_upgradable!(MonitorEvent, ;
162168
(0, HTLCEvent),
163-
// Note that UpdateCompleted is currently never serialized to disk as it is generated only in ChainMonitor
169+
// Note that UpdateCompleted and UpdateFailed is currently never serialized to disk as they are
170+
// generated only in ChainMonitor
164171
(1, UpdateCompleted),
165172
(2, CommitmentTxConfirmed),
173+
(3, UpdateFailed),
166174
);
167175

168176
/// Simple structure sent back by `chain::Watch` when an HTLC from a forward channel is detected on
@@ -656,7 +664,17 @@ pub(crate) struct ChannelMonitorImpl<Signer: Sign> {
656664

657665
payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
658666

667+
// Note that MonitorEvents MUST NOT be generated during update processing, only generated
668+
// during chain data processing. This prevents a race in ChainMonitor::update_channel (and
669+
// presumably user implementations thereof as well) where we update the in-memory channel
670+
// object, then before the persistence finishes (as its all under a read-lock), we return
671+
// pending events to the user or to the relevant ChannelManager. Then, on reload, we'll have
672+
// the pre-event state here, but have processed the event in the ChannelManager.
673+
// Note that because the `event_lock` in `ChainMonitor` is only taken in
674+
// block/transaction-connected events and *not* during block/transaction-disconnected events,
675+
// we further MUST NOT generate events during block/transaction-disconnection.
659676
pending_monitor_events: Vec<MonitorEvent>,
677+
660678
pending_events: Vec<Event>,
661679

662680
// Used to track on-chain events (i.e., transactions part of channels confirmed on chain) on

lightning/src/chain/mod.rs

+4
Original file line numberDiff line numberDiff line change
@@ -281,6 +281,10 @@ pub trait Watch<ChannelSigner: Sign> {
281281

282282
/// Returns any monitor events since the last call. Subsequent calls must only return new
283283
/// events.
284+
///
285+
/// Note that after any block- or transaction-connection calls to a [`ChannelMonitor`], no
286+
/// further events may be returned here until the [`ChannelMonitor`] has been fully persisted
287+
/// to disk.
284288
fn release_pending_monitor_events(&self) -> Vec<MonitorEvent>;
285289
}
286290

lightning/src/ln/channelmanager.rs

+22-15
Original file line numberDiff line numberDiff line change
@@ -4087,7 +4087,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
40874087
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
40884088
}
40894089
},
4090-
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) => {
4090+
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
4091+
MonitorEvent::UpdateFailed(funding_outpoint) => {
40914092
let mut channel_lock = self.channel_state.lock().unwrap();
40924093
let channel_state = &mut *channel_lock;
40934094
let by_id = &mut channel_state.by_id;
@@ -4103,7 +4104,12 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
41034104
msg: update
41044105
});
41054106
}
4106-
self.issue_channel_close_events(&chan, ClosureReason::CommitmentTxConfirmed);
4107+
let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
4108+
ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
4109+
} else {
4110+
ClosureReason::CommitmentTxConfirmed
4111+
};
4112+
self.issue_channel_close_events(&chan, reason);
41074113
pending_msg_events.push(events::MessageSendEvent::HandleError {
41084114
node_id: chan.get_counterparty_node_id(),
41094115
action: msgs::ErrorAction::SendErrorMessage {
@@ -5437,20 +5443,21 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable f
54375443
///
54385444
/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
54395445
/// is:
5440-
/// 1) Deserialize all stored ChannelMonitors.
5441-
/// 2) Deserialize the ChannelManager by filling in this struct and calling:
5442-
/// <(BlockHash, ChannelManager)>::read(reader, args)
5443-
/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
5444-
/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
5445-
/// 3) If you are not fetching full blocks, register all relevant ChannelMonitor outpoints the same
5446-
/// way you would handle a `chain::Filter` call using ChannelMonitor::get_outputs_to_watch() and
5447-
/// ChannelMonitor::get_funding_txo().
5448-
/// 4) Reconnect blocks on your ChannelMonitors.
5449-
/// 5) Disconnect/connect blocks on the ChannelManager.
5450-
/// 6) Move the ChannelMonitors into your local chain::Watch.
5446+
/// 1) Deserialize all stored [`ChannelMonitor`]s.
5447+
/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
5448+
/// `<(BlockHash, ChannelManager)>::read(reader, args)`
5449+
/// This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
5450+
/// [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
5451+
/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
5452+
/// same way you would handle a [`chain::Filter`] call using
5453+
/// [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
5454+
/// 4) Reconnect blocks on your [`ChannelMonitor`]s.
5455+
/// 5) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
5456+
/// 6) Disconnect/connect blocks on the [`ChannelManager`].
5457+
/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`].
54515458
///
5452-
/// Note that the ordering of #4-6 is not of importance, however all three must occur before you
5453-
/// call any other methods on the newly-deserialized ChannelManager.
5459+
/// Note that the ordering of #4-7 is not of importance, however all four must occur before you
5460+
/// call any other methods on the newly-deserialized [`ChannelManager`].
54545461
///
54555462
/// Note that because some channels may be closed during deserialization, it is critical that you
54565463
/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to

lightning/src/util/test_utils.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ impl<Signer: keysinterface::Sign> chainmonitor::Persist<Signer> for TestPersiste
188188
ret
189189
}
190190

191-
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &channelmonitor::ChannelMonitorUpdate, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
191+
fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: &Option<channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor<Signer>, _id: MonitorUpdateId) -> Result<(), chain::ChannelMonitorUpdateErr> {
192192
let ret = self.update_ret.lock().unwrap().clone();
193193
if let Some(next_ret) = self.next_update_ret.lock().unwrap().take() {
194194
*self.update_ret.lock().unwrap() = next_ret;

0 commit comments

Comments
 (0)