diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml new file mode 100644 index 00000000000..73723fe34c3 --- /dev/null +++ b/.github/workflows/semver.yml @@ -0,0 +1,57 @@ +name: SemVer checks +on: + push: + branches-ignore: + - master + pull_request: + branches-ignore: + - master + +jobs: + semver-checks: + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v4 + - name: Install Rust stable toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup override set stable + - name: Check SemVer with default features + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + feature-group: default-features + - name: Check SemVer *without* default features + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + feature-group: only-explicit-features + - name: Check lightning-background-processor SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + package: lightning-background-processor + feature-group: only-explicit-features + features: futures + - name: Check lightning-block-sync SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + package: lightning-block-sync + feature-group: only-explicit-features + features: rpc-client,rest-client + - name: Check lightning-transaction-sync electrum SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + manifest-path: lightning-transaction-sync/Cargo.toml + feature-group: only-explicit-features + features: electrum + - name: Check lightning-transaction-sync esplora-blocking SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + manifest-path: lightning-transaction-sync/Cargo.toml + feature-group: only-explicit-features + features: esplora-blocking + - name: Check lightning-transaction-sync esplora-async SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + manifest-path: lightning-transaction-sync/Cargo.toml + feature-group: only-explicit-features + features: esplora-async diff --git a/CHANGELOG.md b/CHANGELOG.md index be12be58d1e..db55e7904bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,40 @@ +# 0.1.2 - Apr 02, 2025 - "Foolishly Edgy Cases" + +## API Updates + * `lightning-invoice` is now re-exported as `lightning::bolt11_invoice` + (#3671). + +## Performance Improvements + * `rapid-gossip-sync` graph parsing is substantially faster, resolving a + regression in 0.1 (#3581). + * `NetworkGraph` loading is now substantially faster and does fewer + allocations, resulting in a 20% further improvement in `rapid-gossip-sync` + loading when initializing from scratch (#3581). + * `ChannelMonitor`s for closed channels are no longer always re-persisted + immediately after startup, reducing on-startup I/O burden (#3619). + +## Bug Fixes + * BOLT 11 invoices longer than 1023 bytes long (and up to 7089 bytes) now + properly parse (#3665). + * In some cases, when using synchronous persistence with higher latency than + the latency to communicate with peers, when receiving an MPP payment with + multiple parts received over the same channel, a channel could hang and not + make progress, eventually leading to a force-closure due to timed-out HTLCs. + This has now been fixed (#3680). + * Some rare cases with multi-hop BOLT 11 route hints or multiple redundant + blinded paths could have led to the router creating invalid `Route`s were + fixed (#3586). + * Corrected the decay logic in `ProbabilisticScorer`'s historical buckets + model. Note that by default historical buckets are only decayed if no new + datapoints have been added for a channel for two weeks (#3562). + * `{Channel,Onion}MessageHandler::peer_disconnected` will now be called if a + different message handler refused connection by returning an `Err` from its + `peer_connected` method (#3580). + * If the counterparty broadcasts a revoked state with pending HTLCs, those + will now be claimed with other outputs which we consider to not be + vulnerable to pinning attacks if they are not yet claimable by our + counterparty, potentially reducing our exposure to pinning attacks (#3564). + # 0.1.1 - Jan 28, 2025 - "Onchain Matters" ## API Updates diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index f4987569fda..198e1c4b13b 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -132,3 +132,5 @@ RUSTFLAGS="--cfg=taproot" cargo test --verbose --color always -p lightning RUSTFLAGS="--cfg=splicing" cargo test --verbose --color always -p lightning [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean RUSTFLAGS="--cfg=async_payments" cargo test --verbose --color always -p lightning +[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean +RUSTFLAGS="--cfg=lsps1_service" cargo test --verbose --color always -p lightning-liquidity diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 893e1c58ebd..2e3b3e17f56 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -2371,8 +2371,8 @@ mod tests { 42, 53, features, - $nodes[0].node.get_our_node_id(), - $nodes[1].node.get_our_node_id(), + $nodes[0].node.get_our_node_id().into(), + $nodes[1].node.get_our_node_id().into(), ) .expect("Failed to update channel from partial announcement"); let original_graph_description = $nodes[0].network_graph.to_string(); diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index f4629c1ad5c..c45784e00c7 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.33.1" +version = "0.33.2" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" diff --git a/lightning-invoice/src/de.rs b/lightning-invoice/src/de.rs index ee071d6349a..85a0924ce22 100644 --- a/lightning-invoice/src/de.rs +++ b/lightning-invoice/src/de.rs @@ -9,9 +9,10 @@ use core::str::FromStr; use std::error; use bech32::primitives::decode::{CheckedHrpstring, CheckedHrpstringError}; -use bech32::{Bech32, Fe32, Fe32IterExt}; +use bech32::{Fe32, Fe32IterExt}; use crate::prelude::*; +use crate::Bolt11Bech32; use bitcoin::hashes::sha256; use bitcoin::hashes::Hash; use bitcoin::{PubkeyHash, ScriptHash, WitnessVersion}; @@ -377,7 +378,7 @@ impl FromStr for SignedRawBolt11Invoice { type Err = Bolt11ParseError; fn from_str(s: &str) -> Result { - let parsed = CheckedHrpstring::new::(s)?; + let parsed = CheckedHrpstring::new::(s)?; let hrp = parsed.hrp(); // Access original non-packed 32 byte values (as Fe32s) // Note: the type argument is needed due to the API peculiarities, but it's not used @@ -1175,4 +1176,244 @@ mod test { ) ) } + + // Test some long invoice test vectors successfully roundtrip. Generated + // from Lexe proptest: . + #[test] + fn test_deser_long_test_vectors() { + use crate::Bolt11Invoice; + + #[track_caller] + fn parse_ok(invoice_str: &str) { + let invoice = Bolt11Invoice::from_str(invoice_str).unwrap(); + let invoice_str2 = invoice.to_string(); + if invoice_str != invoice_str2 { + panic!( + "Invoice does not roundtrip: invoice_str != invoice_str2\n\ + invoice_str: {invoice_str}\n\ + invoice_str2: {invoice_str2}\n\ + \n\ + {invoice:?}" + ); + } + } + + // 1024 B shrunk invoice just above previous limit of 1023 B from Lexe proptest + parse_ok( + "lnbc10000000000000000010p1qqqqqqqdtuxpqkzq8sjzqgps4pvyczqq8sjzqgpuysszq0pyyqsrp2zs0sjz\ + qgps4pxrcfpqyqc2slpyyqsqsv9gwz59s5zqpqyps5rc9qsrs2pqxz5ysyzcfqgysyzs0sjzqgqq8sjzqgps4p\ + xqqzps4pqpssqgzpxps5ruysszqrps4pg8p2zgpsc2snpuysszqzqsgqvys0pyyqsrcfpqyqvycv9gfqqrcfpq\ + yq7zggpq8q5zqyruysszqwpgyqxpsjqsgq7zggpqps7zggpq8sjzqgqgqq7zggpqpq7zggpq8q5zqqpuysszq0\ + pyyqsqs0pyyqspsnqgzpqpqlpyyqsqszpuysszqyzvzpvysrqq8sjzqgqvrp7zggpqpqxpsspp5mf45hs3cgph\ + h0074r5qmr74y82r26ac4pzdg4nd9mdmsvz6ffqpssp5vr4yra4pcv74h9hk3d0233nqu4gktpuykjamrafrdp\ + uedqugzh3q9q2sqqqqqysgqcqrpqqxq8pqqqqqqnp4qgvcxpme2q5lng36j9gruwlrtk2f86s3c5xmk87yhvyu\ + wdeh025q5r9yqwnqegv9hj9nzkhyxaeyq92wcrnqp36pyrc2qzrvswj5g96ey2dn6qqqqqqqqqqqqqqqqqqqqq\ + qqqqqqqqp9a5vs0t4z56p64xyma8s84yvdx7uhqj0gvrr424fea2wpztq2fwqqqqqqqqqqqqqqqqqqqqqqqqqq\ + qqqqmy9qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\ + qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpcnsxc32du9n7amlypuhclzqrt6lkegq\ + 0v3r7nczjv9tv30z7phq80r3dm7pvgykl7gwuenmem93h5xwdwac6ngsmzqc34khrg3qjgsq6qk6lc" + ); + // 1517 B mainnet invoice from Lexe proptest + parse_ok( + "lnbc8735500635020489010p1av5kfs8deupvyk4u5ynj03hmalhhhml0fxc2jlrv9z4lg6s4hnhkz69malhhe\ + t3x9yqpsxru4a3kwar2qtu2q2ughx367q600s5x7c7tln4k0fu78skxqevaqm8sayhuur377zgf3uf94n57xzh\ + dw99u42hwc089djn5xj723w7zageflsnzdmyte89tecf2ac7xhg4y3u9f4xpuv2hwxjlsarp0e24fu8tme6rgv\ + 0tqj08z9f4u30rw59k8emhtvs7wye0xfw6x5q5tju2p208rvtkunzwtwghtp22tlnh62gxwhfkxp4cnz7ts3rx\ + vlzszhv9y00h77lpdvcjyhjtmalh5dn5e8n5w8cqle0vunzduu4nza9y0734qhxday9hzywl0aa0vhzy0qmphc\ + 64d4hduj08dv2krpgqtc2v83gptk34reelxyc7wsgnze890c6nrv6p0cmepatc269eayzjjkqk30n52rfl5dg7\ + wztl96f7wc2tzx34q909xuajnyt4u4lnk87lwal7z0etdz5tmece0v3u796jfp68nccn05ty54ncfelts3v8g0\ + sn6v6hsu87zat4r03368ersu87252dd0nswymxzc2pyxl8yy844hspuyj47w0px4u4leefq568sk0rr9th4ql9\ + f9ykawrczkz5hp22nstg3lrlsa6u2q2ull3kzce2sh0h77sjv0zszhzy4hfh6u0pwux5l3gpthsn72mfu47sw9\ + zw3hzk7srznp27z0etdp0725me00sn72mgkf0fteehruk0lg6swh34z52puaekzmjlmalhhe6m8ug7z3c8g8zh\ + jjspp5zj0sm85g5ufng9w7s6p4ucdk80tyvz64sg54v0cy4vgnr37f78sqsp5l6azu2hv6we30er90jrslqpvd\ + trnrphhesca2wg5q83k52rsu2cq9q2sqqqqqysgqcqr8h2np4qw0ha2k282hm8jh5rcfq0hsp2zhddtlc5vs23\ + uphyv0lv3k8sqsfgfp4qyrk86tx5xg2aa7et4cdzhnvl5s4nd33ugytt7gamk9tugn9yransr9yq08gpwsn8t2\ + tq4ducjfhrcz707av0ss20urjh8vldrpmehqxa0stkesvuq82txyqzfhej7qccswy7k5wvcppk63c6zpjytfda\ + ccadacjtn52lpe6s85rjfqlxzp6frq33xshaz2nr9xjkhd3jj8qg39nmfzvpgmayakqmy9rseakwgcudug7hs4\ + 5wh430ywh7qhj3khczh8gle4cn93ymgfwa7rrvcw9lywyyz58k4p40a3nu9svthaf0qeg8f2ay4tw9p48p70qm\ + ayu3ejl2q8pj9e2l22h7775tl44hs6ke4sdfgcr6aj8wra4r2v9sj6xa5chd5ctpfg8chtrer3kkp0e6af88lk\ + rfxcklf2hyslv2hr0xl5lwrm5y5uttxn4ndfz8789znf78nspa3xy68" + ); + // 1804 B regtest invoice from Lexe proptest + parse_ok( + "lnbcrt17124979001314909880p1y6lkcwgd76tfnxksfk2atyy4tzw4nyg6jrx3282s2ygvcxyj64gevhxsjk\ + 2ymhzv3e0p5h5u3kfey92jt9ge44gsfnwycxynm2g3unw3ntt9qh25texe98jcfhxvcxuezxw9tngwrndpy9s4\ + p4x9eyze2tfe9rxm68tp5yj5jfduen2nny8prhsm6edegn2stww4n4gwp4vfjkvdthd43524n9fa8h262vwesk\ + g66nw3vnyafn29zhsvfeg9mxummtfp35uumzfqmhy3jwgdh55mt5xpvhgmjn25uku5e5g939wmmnvdfygnrdgd\ + h56uzcx4a92vfhgdcky3z9gfnrsvp4f4f55j68vak9yufhvdm8x5zrgc6955jvf429zumv89nh2a35wae5yntg\ + v985jumpxehyv7t92pjrwufs89yh23f5ddy5s568wgchve3cg9ek5nzewgcrzjz0dftxg3nvf4hngje52ac4zm\ + esxpvk6sfef4hkuetvd4vk6n29wftrw5rvg4yy2vjjwyexc5mnvfd8xknndpqkkenx0q642j35298hwve3dyc5\ + 25jrd3295sm9v9jrqup3wpykg7zd239ns7jgtqu95jz0deaxksjh2fu56n6n2f5x6mm8wa89qjfef385sam2x9\ + mxcs20gfpnq460d3axzknnf3e4sw2kvf25wjjxddpyg52dw4vx7nn2w9cyu5t8vfnyxjtpg33kssjp24ch536p\ + d938snmtx345x6r4x93kvv2tff855um3tfekxjted4kxys2kve5hvu6g89z4ynmjgfhnw7tv892rymejgvey77\ + rcfqe9xjr92d85636fvajxyajndfa92k2nxycx5jtjx4zxsm2y2dyn2up50f5ku3nrfdk4g5npxehkzjjv8y69\ + gveev4z56denddaxy7tfwe8xx42zgf6kzmnxxpk826ze2s6xk6jrwearw6ejvd8rsvj2fpg525jtd5pp5j2tlt\ + 28m4kakjr84w6ce4fd8e7awy6ncyswcyut760rdnem30ptssp5p5u3xgxxtr6aev8y2w9m30wcw3kyn7fgm8wm\ + f8qw8wzrqt34zcvq9q2sqqqqqysgqcqypmw9xq8lllllllnp4qt36twam2ca08m3s7vnhre3c0j89589wyw4vd\ + k7fln0lryxzkdcrur28qwqq3hnyt84vsasuldd2786eysdf4dyuggwsmvw2atftf7spkmpa9dd3efq5tenpqm2\ + v7vcz2a4s0s7jnqpjn0srysnstnw5y5z9taxn0ue37aqgufxcdsj6f8a2m4pm9udppdzc4shsdqzzx0u0rm4xl\ + js0dqz3c5zqyvglda7nsqvqfztmlyup7vyuadzav4zyuqwx90ev6nmk53nkhkt0sev9e745wxqtdvrqzgqkaka\ + zen7e2qmsdauk665g3llg5qtl79t3xulrhjnducehdn72gpmkjvtth7kh6ejpl9dv0qcsxv2jvzzvg0hzdmk3y\ + jsmydqksdk3h78kc63qnr265h8vyeslqexszppfm7y287t3gxvhw0ulg2wp0rsw3tevz03z50kpy77zdz9snxm\ + kkwxd76xvj4qvj2f89rrnuvdvzw947ay0kydc077pkec2jet9qwp2tud98s24u65uz07eaxk5jk3e4nggn2caa\ + ek2p5pkrc6mm6mxjm2ezpdu8p5jstg6tgvnttgac3ygt5ys04t4udujzlshpl7e4f3ff03xe6v24cp6aq4wa" + ); + // 1870 B testnet invoice from Lexe proptest + parse_ok( + "lntb5826417333454665580p1c5rwh5edlhf33hvkj5vav5z3t02a5hxvj3vfv5kuny2f3yzj6zwf9hx3nn2fk\ + 9gepc2a3ywvj6dax5v3jy2d5nxmp3gaxhycjkv38hx4z4d4vyznrp2p24xa6t2pg4w4rrxfens6tcxdhxvvfhx\ + a8xvvpkgat8xnpe2p44juz9g43hyur00989gvfhwd2kj72wfum4g4mgx5m5cs2rg9d9vnn6xe89ydnnvfpyy52\ + s2dxx2er4x4xxwstdd5cxwdrjw3nkxnnv2uexxnrxw4t56sjswfn52s2xv4t8xmjtwpn8xm6sfeh4q526dyu8x\ + 3r9gceyw6fhd934qjttvdk57az5w368zdrhwfjxxu35xcmrsmmpd4g8wwtev4tkzutdd32k56mxveuy6c6v2em\ + yv7zkfp39zjpjgd8hx7n4xph5kceswf6xxmnyfcuxca20fp24z7ncvfhyu5jf2exhw36nwf68s7rh2a6yzjf4d\ + gukcenfxpchqsjn2pt5x334tf98wsm6dvcrvvfcwapxvk2cdvmk2npcfe68zue3w4f9xc6s2fvrw6nrg3fkskt\ + e2ftxyc20ffckcd692964sdzjwdp4yvrfdfm9q72pxp3kwat5f4j9xee5da8rss60w92857tgwych55f5w3n8z\ + mzexpy4jwredejrqm6txf3nxm64ffh8x460dp9yjazhw4yx6dm5xerysnn5wa455k3h2d89ss2fd9axwjp3f4r\ + 9qdmfd4fx6stx2eg9sezrv369w7nvvfvhj4nnwaz5z3ny8qcxcdnvwd64jc2nx9uy2e2gxdrnx6r3w9ykxatxx\ + g6kk6rv2ekr2emwx5ehy362d3x82dzvddfxs5rcg4vn27npf564qdtg2anycc6523jnwe3e0p65unrpvccrs5m\ + 2fuexgmnj23ay5e34v4xk5jnrwpg4xemfwqe5vjjjw9qk76zsd9yrzu6xdpv5v5ntdejxg6jtv3kx65t6gdhrg\ + vj3fe34sj2vv3h5kegpp57hjf5kv6clw97y2e063yuz0psrz9a6l49v836dflum00rh8qtn8qsp5gd29qycuze\ + 08xls8l32zjaaf2uqv78v97lg9ss0c699huw980h2q9q2sqqqqqysgqcqr8ulnp4q26hcfwr7qxz7lwwlr2kjc\ + rws7m2u5j36mm0kxa45uxy6zvsqt2zzfppjdkrm2rlgadt9dq3d6jkv4r2cugmf2kamr28qwuleyzzyyly8a6t\ + u70eldahx7hzxx5x9gms7vjjr577ps8n4qyds5nern39j0v7czkch2letnt46895jupxgehf208xgxz8d6j8gu\ + 3h2qqtsk9nr9nuquhkqjxw40h2ucpldrawmktxzxdgtkt9a3p95g98nywved8s8laj2a0c98rq5zzdnzddz6nd\ + w0lvr6u0av9m7859844cgz9vpeq05gw79zqae2s7jzeq66wydyueqtp56qc67g7krv6lj5aahxtmq4y208q5qy\ + z38cnwl9ma6m5f4nhzqaj0tjxpfrk4nr5arv9d20lvxvddvffhzygmyuvwd959uhdcgcgjejchqt2qncuwpqqk\ + 5vws7dflw8x6esrfwhz7h3jwmhevf445k76nme926sr8drsdveqg7l7t7lnjvhaludqnwk4l2pmevkjf9pla92\ + 4p77v76r7x8jzyy7h59hmk0lgzfsk6c8dpj37hssj7jt4q7jzvy8hq25l3pag37axxanjqnq56c47gpgy6frsy\ + c0str9w2aahz4h6t7axaka4cwvhwg49r6qgj8kwz2mt6vcje25l9ekvmgq5spqtn" + ); + } + + // Generate a valid invoice of `MAX_LENGTH` bytes and ensure that it roundtrips. + #[test] + fn test_serde_long_invoice() { + use crate::TaggedField::*; + use crate::{ + Bolt11Invoice, Bolt11InvoiceFeatures, Bolt11InvoiceSignature, Currency, + PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, RawTaggedField, Sha256, + SignedRawBolt11Invoice, + }; + use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; + use bitcoin::secp256k1::PublicKey; + use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; + + // Generate an `UnknownSemantics` field with a given length. + fn unknown_semantics_field(len: usize) -> Vec { + assert!(len <= 1023); + let mut field = Vec::with_capacity(len + 3); + // Big-endian encoded length prefix + field.push(Fe32::Q); + field.push(Fe32::try_from((len >> 5) as u8).unwrap()); + field.push(Fe32::try_from((len & 0x1f) as u8).unwrap()); + // Data + field.extend(std::iter::repeat(Fe32::P).take(len)); + field + } + + // Invoice fields + let payment_hash = sha256::Hash::from_str( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(); + let description = std::iter::repeat("A").take(639).collect::(); + let fallback_addr = crate::Fallback::SegWitProgram { + version: bitcoin::WitnessVersion::V0, + program: vec![0; 32], + }; + let payee_pk = PublicKey::from_slice(&[ + 0x03, 0x24, 0x65, 0x3e, 0xac, 0x43, 0x44, 0x88, 0x00, 0x2c, 0xc0, 0x6b, 0xbf, 0xb7, + 0xf1, 0x0f, 0xe1, 0x89, 0x91, 0xe3, 0x5f, 0x9f, 0xe4, 0x30, 0x2d, 0xbe, 0xa6, 0xd2, + 0x35, 0x3d, 0xc0, 0xab, 0x1c, + ]) + .unwrap(); + let route_hints = std::iter::repeat(RouteHintHop { + src_node_id: payee_pk, + short_channel_id: 0x0102030405060708, + fees: RoutingFees { base_msat: 1, proportional_millionths: 20 }, + cltv_expiry_delta: 3, + htlc_minimum_msat: None, + htlc_maximum_msat: None, + }) + .take(12) + .collect::>(); + + // Build raw invoice + let raw_invoice = RawBolt11Invoice { + hrp: RawHrp { + currency: Currency::Bitcoin, + raw_amount: Some(10000000000000000010), + si_prefix: Some(crate::SiPrefix::Pico), + }, + data: RawDataPart { + timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), + tagged_fields: vec![ + PaymentHash(Sha256(payment_hash)).into(), + Description(crate::Description::new(description).unwrap()).into(), + PayeePubKey(crate::PayeePubKey(payee_pk)).into(), + ExpiryTime(crate::ExpiryTime(std::time::Duration::from_secs(u64::MAX))).into(), + MinFinalCltvExpiryDelta(crate::MinFinalCltvExpiryDelta(u64::MAX)).into(), + Fallback(fallback_addr).into(), + PrivateRoute(crate::PrivateRoute(RouteHint(route_hints))).into(), + PaymentSecret(crate::PaymentSecret([17; 32])).into(), + PaymentMetadata(vec![0x69; 639]).into(), + Features(Bolt11InvoiceFeatures::from_le_bytes(vec![0xaa; 639])).into(), + // This invoice is 4458 B w/o unknown semantics fields. + // Need to add some non-standard fields to reach 7089 B limit. + RawTaggedField::UnknownSemantics(unknown_semantics_field(1023)), + RawTaggedField::UnknownSemantics(unknown_semantics_field(1023)), + RawTaggedField::UnknownSemantics(unknown_semantics_field(576)), + ], + }, + }; + + // Build signed invoice + let hash = [ + 0x75, 0x99, 0xe1, 0x51, 0x7f, 0xa1, 0x0e, 0xb5, 0xc0, 0x79, 0xb4, 0x6e, 0x8e, 0x62, + 0x0c, 0x4f, 0xb0, 0x72, 0x71, 0xd2, 0x81, 0xa1, 0x92, 0x65, 0x9c, 0x90, 0x89, 0x69, + 0xe1, 0xf3, 0xd6, 0x59, + ]; + let signature = &[ + 0x6c, 0xbe, 0xbe, 0xfe, 0xd3, 0xfb, 0x07, 0x68, 0xb5, 0x79, 0x98, 0x82, 0x29, 0xab, + 0x0e, 0xcc, 0x8d, 0x3a, 0x81, 0xee, 0xee, 0x07, 0xb3, 0x5d, 0x64, 0xca, 0xb4, 0x12, + 0x33, 0x99, 0x33, 0x2a, 0x31, 0xc2, 0x2c, 0x2b, 0x62, 0x96, 0x4e, 0x37, 0xd7, 0x96, + 0x50, 0x5e, 0xdb, 0xe9, 0xa9, 0x5b, 0x0b, 0x3b, 0x87, 0x22, 0x89, 0xed, 0x95, 0xf1, + 0xf1, 0xdf, 0x2d, 0xb6, 0xbd, 0xf5, 0x0a, 0x20, + ]; + let signature = Bolt11InvoiceSignature( + RecoverableSignature::from_compact(signature, RecoveryId::from_i32(1).unwrap()) + .unwrap(), + ); + let signed_invoice = SignedRawBolt11Invoice { raw_invoice, hash, signature }; + + // Ensure serialized invoice roundtrips + let invoice = Bolt11Invoice::from_signed(signed_invoice).unwrap(); + let invoice_str = invoice.to_string(); + assert_eq!(invoice_str.len(), crate::MAX_LENGTH); + assert_eq!(invoice, Bolt11Invoice::from_str(&invoice_str).unwrap()); + } + + // Test that invoices above the maximum length fail to parse with the expected error. + #[test] + fn test_deser_too_long_fails() { + use crate::{Bolt11Invoice, ParseOrSemanticError, MAX_LENGTH}; + use bech32::primitives::decode::{CheckedHrpstringError, ChecksumError}; + + fn parse_is_code_length_err(s: &str) -> bool { + // Need matches! b/c ChecksumError::CodeLength(_) is marked non-exhaustive + matches!( + Bolt11Invoice::from_str(s), + Err(ParseOrSemanticError::ParseError(Bolt11ParseError::Bech32Error( + CheckedHrpstringError::Checksum(ChecksumError::CodeLength(_)) + ))), + ) + } + + let mut too_long = String::from("lnbc1"); + too_long.push_str( + String::from_utf8(vec![b'x'; (MAX_LENGTH + 1) - too_long.len()]).unwrap().as_str(), + ); + assert!(parse_is_code_length_err(&too_long)); + assert!(!parse_is_code_length_err(&too_long[..too_long.len() - 1])); + } } diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index ed018535105..9a99d05929b 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -31,7 +31,7 @@ extern crate serde; use std::time::SystemTime; use bech32::primitives::decode::CheckedHrpstringError; -use bech32::Fe32; +use bech32::{Checksum, Fe32}; use bitcoin::hashes::{sha256, Hash}; use bitcoin::{Address, Network, PubkeyHash, ScriptHash, WitnessProgram, WitnessVersion}; use lightning_types::features::Bolt11InvoiceFeatures; @@ -149,6 +149,35 @@ pub const DEFAULT_EXPIRY_TIME: u64 = 3600; /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md pub const DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA: u64 = 18; +/// lightning-invoice will reject BOLT11 invoices that are longer than 7089 bytes. +/// +/// ### Rationale +/// +/// This value matches LND's implementation, which was chosen to be "the max number +/// of bytes that can fit in a QR code". LND's rationale is technically incorrect +/// as QR codes actually have a max capacity of 7089 _numeric_ characters and only +/// support up to 4296 all-uppercase alphanumeric characters. However, ecosystem-wide +/// consistency is more important. +pub const MAX_LENGTH: usize = 7089; + +/// The [`bech32::Bech32`] checksum algorithm, with extended max length suitable +/// for BOLT11 invoices. +/// +/// This is not exported to bindings users as it generally shouldn't be used directly publicly +/// anyway. +pub enum Bolt11Bech32 {} + +impl Checksum for Bolt11Bech32 { + /// Extend the max length from the 1023 bytes default. + const CODE_LENGTH: usize = MAX_LENGTH; + + // Inherit the other fields from `bech32::Bech32`. + type MidstateRepr = ::MidstateRepr; + const CHECKSUM_LENGTH: usize = bech32::Bech32::CHECKSUM_LENGTH; + const GENERATOR_SH: [Self::MidstateRepr; 5] = bech32::Bech32::GENERATOR_SH; + const TARGET_RESIDUE: Self::MidstateRepr = bech32::Bech32::TARGET_RESIDUE; +} + /// Builder for [`Bolt11Invoice`]s. It's the most convenient and advised way to use this library. It /// ensures that only a semantically and syntactically correct invoice can be built using it. /// diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index ed229b8b69a..f6bebca3d15 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -38,6 +38,7 @@ lightning-background-processor = { version = "0.1.0", path = "../lightning-backg proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } +parking_lot = { version = "0.12", default-features = false } [lints.rust.unexpected_cfgs] level = "forbid" diff --git a/lightning-liquidity/README.md b/lightning-liquidity/README.md index dd74ba830c2..163f4151341 100644 --- a/lightning-liquidity/README.md +++ b/lightning-liquidity/README.md @@ -1,15 +1,15 @@ # lightning-liquidity -The goal of this crate is to provide types and primitives to integrate a spec-compliant LSP with an LDK-based node. To this end, this crate provides client-side as well as service-side logic to implement the [LSP specifications]. +The goal of this crate is to provide types and primitives to integrate a spec-compliant LSP with an LDK-based node. To this end, this crate provides client-side as well as service-side logic to implement the LSPS specifications. **Note**: Service-side support is currently considered "beta", i.e., not fully ready for production use. Currently the following specifications are supported: -- [LSPS0] defines the transport protocol with the LSP over which the other protocols communicate. -- [LSPS1] allows to order Lightning channels from an LSP. This is useful when the client needs +- [bLIP-50 / LSPS0] defines the transport protocol with the LSP over which the other protocols communicate. +- [bLIP-51 / LSPS1] allows to order Lightning channels from an LSP. This is useful when the client needs inbound Lightning liquidity for which they are willing and able to pay in bitcoin. -- [LSPS2] allows to generate a special invoice for which, when paid, an LSP +- [bLIP-52 / LSPS2] allows to generate a special invoice for which, when paid, an LSP will open a "just-in-time" channel. This is useful for the initial on-boarding of clients as the channel opening fees are deducted from the incoming payment, i.e., no funds are required client-side to initiate this @@ -19,7 +19,6 @@ To get started, you'll want to setup a `LiquidityManager` and configure it to be `LiquidityManager` uses an eventing system to notify the user about important updates to the protocol flow. To this end, you will need to handle events emitted via one of the event handling methods provided by `LiquidityManager`, e.g., `LiquidityManager::next_event`. -[LSP specifications]: https://github.com/BitcoinAndLightningLayerSpecs/lsp -[LSPS0]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0 -[LSPS1]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS1 -[LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS2 +[bLIP-50 / LSPS0]: https://github.com/lightning/blips/blob/master/blip-0050.md +[bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md +[bLIP-52 / LSPS2]: https://github.com/lightning/blips/blob/master/blip-0052.md diff --git a/lightning-liquidity/src/events.rs b/lightning-liquidity/src/events.rs index 3db772deec8..46308c7446c 100644 --- a/lightning-liquidity/src/events.rs +++ b/lightning-liquidity/src/events.rs @@ -18,9 +18,11 @@ use crate::lsps0; use crate::lsps1; use crate::lsps2; -use crate::prelude::{Vec, VecDeque}; use crate::sync::{Arc, Mutex}; +use alloc::collections::VecDeque; +use alloc::vec::Vec; + use core::future::Future; use core::task::{Poll, Waker}; @@ -28,7 +30,7 @@ use core::task::{Poll, Waker}; pub const MAX_EVENT_QUEUE_SIZE: usize = 1000; pub(crate) struct EventQueue { - queue: Arc>>, + queue: Arc>>, waker: Arc>>, #[cfg(feature = "std")] condvar: crate::sync::Condvar, @@ -47,11 +49,11 @@ impl EventQueue { Self { queue, waker } } - pub fn enqueue(&self, event: Event) { + pub fn enqueue>(&self, event: E) { { let mut queue = self.queue.lock().unwrap(); if queue.len() < MAX_EVENT_QUEUE_SIZE { - queue.push_back(event); + queue.push_back(event.into()); } else { return; } @@ -64,19 +66,21 @@ impl EventQueue { self.condvar.notify_one(); } - pub fn next_event(&self) -> Option { + pub fn next_event(&self) -> Option { self.queue.lock().unwrap().pop_front() } - pub async fn next_event_async(&self) -> Event { + pub async fn next_event_async(&self) -> LiquidityEvent { EventFuture { event_queue: Arc::clone(&self.queue), waker: Arc::clone(&self.waker) }.await } #[cfg(feature = "std")] - pub fn wait_next_event(&self) -> Event { + pub fn wait_next_event(&self) -> LiquidityEvent { let mut queue = self .condvar - .wait_while(self.queue.lock().unwrap(), |queue: &mut VecDeque| queue.is_empty()) + .wait_while(self.queue.lock().unwrap(), |queue: &mut VecDeque| { + queue.is_empty() + }) .unwrap(); let event = queue.pop_front().expect("non-empty queue"); @@ -95,14 +99,14 @@ impl EventQueue { event } - pub fn get_and_clear_pending_events(&self) -> Vec { + pub fn get_and_clear_pending_events(&self) -> Vec { self.queue.lock().unwrap().split_off(0).into() } } /// An event which you should probably take some action in response to. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Event { +pub enum LiquidityEvent { /// An LSPS0 client event. LSPS0Client(lsps0::event::LSPS0ClientEvent), /// An LSPS1 (Channel Request) client event. @@ -116,13 +120,44 @@ pub enum Event { LSPS2Service(lsps2::event::LSPS2ServiceEvent), } +impl From for LiquidityEvent { + fn from(event: lsps0::event::LSPS0ClientEvent) -> Self { + Self::LSPS0Client(event) + } +} + +impl From for LiquidityEvent { + fn from(event: lsps1::event::LSPS1ClientEvent) -> Self { + Self::LSPS1Client(event) + } +} + +#[cfg(lsps1_service)] +impl From for LiquidityEvent { + fn from(event: lsps1::event::LSPS1ServiceEvent) -> Self { + Self::LSPS1Service(event) + } +} + +impl From for LiquidityEvent { + fn from(event: lsps2::event::LSPS2ClientEvent) -> Self { + Self::LSPS2Client(event) + } +} + +impl From for LiquidityEvent { + fn from(event: lsps2::event::LSPS2ServiceEvent) -> Self { + Self::LSPS2Service(event) + } +} + struct EventFuture { - event_queue: Arc>>, + event_queue: Arc>>, waker: Arc>>, } impl Future for EventFuture { - type Output = Event; + type Output = LiquidityEvent; fn poll( self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>, @@ -154,7 +189,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap()); - let expected_event = Event::LSPS0Client(LSPS0ClientEvent::ListProtocolsResponse { + let expected_event = LiquidityEvent::LSPS0Client(LSPS0ClientEvent::ListProtocolsResponse { counterparty_node_id, protocols: Vec::new(), }); diff --git a/lightning-liquidity/src/lib.rs b/lightning-liquidity/src/lib.rs index 520c2009811..909590eac96 100644 --- a/lightning-liquidity/src/lib.rs +++ b/lightning-liquidity/src/lib.rs @@ -8,19 +8,21 @@ // licenses. #![crate_name = "lightning_liquidity"] -//! The goal of this crate is to provide types and primitives to integrate a spec-compliant LSP with an LDK-based node. To this end, this crate provides client-side as well as service-side logic to implement the [LSP specifications]. +//! The goal of this crate is to provide types and primitives to integrate a spec-compliant LSP +//! with an LDK-based node. To this end, this crate provides client-side as well as service-side +//! logic to implement the LSPS specifications. //! //! **Note**: Service-side support is currently considered "beta", i.e., not fully ready for //! production use. //! //! Currently the following specifications are supported: -//! - [LSPS0] defines the transport protocol with the LSP over which the other protocols communicate. -//! - [LSPS1] allows to order Lightning channels from an LSP. This is useful when the client needs +//! - [bLIP-50 / LSPS0] defines the transport protocol with the LSP over which the other protocols communicate. +//! - [bLIP-51 / LSPS1] defines a protocol for ordering Lightning channels from an LSP. This is useful when the client needs //! inbound Lightning liquidity for which they are willing and able to pay in bitcoin. -//! - [LSPS2] allows to generate a special invoice for which, when paid, an LSP will open a -//! "just-in-time" channel. This is useful for the initial on-boarding of clients as the channel -//! opening fees are deducted from the incoming payment, i.e., no funds are required client-side to -//! initiate this flow. +//! - [bLIP-52 / LSPS2] defines a protocol for generating a special invoice for which, when paid, +//! an LSP will open a "just-in-time" channel. This is useful for the initial on-boarding of +//! clients as the channel opening fees are deducted from the incoming payment, i.e., no funds are +//! required client-side to initiate this flow. //! //! To get started, you'll want to setup a [`LiquidityManager`] and configure it to be the //! [`CustomMessageHandler`] of your LDK node. You can then for example call @@ -32,10 +34,9 @@ //! protocol flow. To this end, you will need to handle events emitted via one of the event //! handling methods provided by [`LiquidityManager`], e.g., [`LiquidityManager::next_event`]. //! -//! [LSP specifications]: https://github.com/BitcoinAndLightningLayerSpecs/lsp -//! [LSPS0]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0 -//! [LSPS1]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS1 -//! [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS2 +//! [bLIP-50 / LSPS0]: https://github.com/lightning/blips/blob/master/blip-0050.md +//! [bLIP-51 / LSPS1]: https://github.com/lightning/blips/blob/master/blip-0051.md +//! [bLIP-52 / LSPS2]: https://github.com/lightning/blips/blob/master/blip-0052.md //! [`CustomMessageHandler`]: lightning::ln::peer_handler::CustomMessageHandler //! [`LiquidityManager::next_event`]: crate::LiquidityManager::next_event #![deny(missing_docs)] @@ -51,12 +52,6 @@ extern crate alloc; mod prelude { - #![allow(unused_imports)] - pub use alloc::{boxed::Box, collections::VecDeque, string::String, vec, vec::Vec}; - - pub use alloc::borrow::ToOwned; - pub use alloc::string::ToString; - pub(crate) use lightning::util::hash_tables::*; } diff --git a/lightning-liquidity/src/lsps0/client.rs b/lightning-liquidity/src/lsps0/client.rs index ab169bd7efb..7b049e65566 100644 --- a/lightning-liquidity/src/lsps0/client.rs +++ b/lightning-liquidity/src/lsps0/client.rs @@ -1,15 +1,16 @@ -//! Contains the main LSPS0 client-side object, [`LSPS0ClientHandler`]. +//! Contains the main bLIP-50 / LSPS0 client-side object, [`LSPS0ClientHandler`]. //! -//! Please refer to the [LSPS0 -//! specifcation](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0) for more +//! Please refer to the [bLIP-50 / LSPS0 +//! specifcation](https://github.com/lightning/blips/blob/master/blip-0050.md) for more //! information. -use crate::events::{Event, EventQueue}; +use crate::events::EventQueue; use crate::lsps0::event::LSPS0ClientEvent; use crate::lsps0::msgs::{ - LSPS0Message, LSPS0Request, LSPS0Response, ListProtocolsRequest, ListProtocolsResponse, + LSPS0ListProtocolsRequest, LSPS0ListProtocolsResponse, LSPS0Message, LSPS0Request, + LSPS0Response, }; -use crate::lsps0::ser::{ProtocolMessageHandler, ResponseError}; +use crate::lsps0::ser::{LSPSProtocolMessageHandler, LSPSResponseError}; use crate::message_queue::MessageQueue; use crate::sync::Arc; use crate::utils; @@ -22,7 +23,7 @@ use bitcoin::secp256k1::PublicKey; use core::ops::Deref; -/// A message handler capable of sending and handling LSPS0 messages. +/// A message handler capable of sending and handling bLIP-50 / LSPS0 messages. pub struct LSPS0ClientHandler where ES::Target: EntropySource, @@ -43,15 +44,15 @@ where Self { entropy_source, pending_messages, pending_events } } - /// Calls LSPS0's `list_protocols`. + /// Calls bLIP-50 / LSPS0's `list_protocols`. /// - /// Please refer to the [LSPS0 - /// specifcation](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0#lsps-specification-support-query) + /// Please refer to the [bLIP-50 / LSPS0 + /// specifcation](https://github.com/lightning/blips/blob/master/blip-0050.md#lsps-specification-support-query) /// for more information. pub fn list_protocols(&self, counterparty_node_id: &PublicKey) { let msg = LSPS0Message::Request( utils::generate_request_id(&self.entropy_source), - LSPS0Request::ListProtocols(ListProtocolsRequest {}), + LSPS0Request::ListProtocols(LSPS0ListProtocolsRequest {}), ); self.pending_messages.enqueue(counterparty_node_id, msg.into()); @@ -61,29 +62,27 @@ where &self, response: LSPS0Response, counterparty_node_id: &PublicKey, ) -> Result<(), LightningError> { match response { - LSPS0Response::ListProtocols(ListProtocolsResponse { protocols }) => { - self.pending_events.enqueue(Event::LSPS0Client( - LSPS0ClientEvent::ListProtocolsResponse { - counterparty_node_id: *counterparty_node_id, - protocols, - }, - )); + LSPS0Response::ListProtocols(LSPS0ListProtocolsResponse { protocols }) => { + self.pending_events.enqueue(LSPS0ClientEvent::ListProtocolsResponse { + counterparty_node_id: *counterparty_node_id, + protocols, + }); Ok(()) }, - LSPS0Response::ListProtocolsError(ResponseError { code, message, data, .. }) => { - Err(LightningError { - err: format!( - "ListProtocols error received. code = {}, message = {}, data = {:?}", - code, message, data - ), - action: ErrorAction::IgnoreAndLog(Level::Info), - }) - }, + LSPS0Response::ListProtocolsError(LSPSResponseError { + code, message, data, .. + }) => Err(LightningError { + err: format!( + "ListProtocols error received. code = {}, message = {}, data = {:?}", + code, message, data + ), + action: ErrorAction::IgnoreAndLog(Level::Info), + }), } } } -impl ProtocolMessageHandler for LSPS0ClientHandler +impl LSPSProtocolMessageHandler for LSPS0ClientHandler where ES::Target: EntropySource, { @@ -114,7 +113,7 @@ mod tests { use alloc::string::ToString; use alloc::sync::Arc; - use crate::lsps0::ser::{LSPSMessage, RequestId}; + use crate::lsps0::ser::{LSPSMessage, LSPSRequestId}; use crate::tests::utils::{self, TestEntropy}; use super::*; @@ -147,8 +146,8 @@ mod tests { assert_eq!( *message, LSPSMessage::LSPS0(LSPS0Message::Request( - RequestId("00000000000000000000000000000000".to_string()), - LSPS0Request::ListProtocols(ListProtocolsRequest {}) + LSPSRequestId("00000000000000000000000000000000".to_string()), + LSPS0Request::ListProtocols(LSPS0ListProtocolsRequest {}) )) ); } diff --git a/lightning-liquidity/src/lsps0/event.rs b/lightning-liquidity/src/lsps0/event.rs index 163114ddb54..97a3a950090 100644 --- a/lightning-liquidity/src/lsps0/event.rs +++ b/lightning-liquidity/src/lsps0/event.rs @@ -7,12 +7,13 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Contains LSPS0 event types +//! Contains bLIP-50 / LSPS0 event types. + +use alloc::vec::Vec; -use crate::prelude::Vec; use bitcoin::secp256k1::PublicKey; -/// An event which an LSPS0 client may want to take some action in response to. +/// An event which an bLIP-50 / LSPS0 client may want to take some action in response to. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS0ClientEvent { /// Information from the LSP about the protocols they support. diff --git a/lightning-liquidity/src/lsps0/mod.rs b/lightning-liquidity/src/lsps0/mod.rs index 4211ef5c2d6..0055112f974 100644 --- a/lightning-liquidity/src/lsps0/mod.rs +++ b/lightning-liquidity/src/lsps0/mod.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Types and primitives that implement the LSPS0: Transport Layer specification. +//! Types and primitives that implement the bLIP-50 / LSPS0: Transport Layer specification. pub mod client; pub mod event; diff --git a/lightning-liquidity/src/lsps0/msgs.rs b/lightning-liquidity/src/lsps0/msgs.rs index 631cc9206c5..24df03a1481 100644 --- a/lightning-liquidity/src/lsps0/msgs.rs +++ b/lightning-liquidity/src/lsps0/msgs.rs @@ -1,72 +1,77 @@ //! Message, request, and other primitive types used to implement LSPS0. -use crate::lsps0::ser::{LSPSMessage, RequestId, ResponseError}; -use crate::prelude::Vec; +use alloc::vec::Vec; +use core::convert::TryFrom; -use serde::{Deserialize, Serialize}; +use crate::lsps0::ser::{LSPSMessage, LSPSRequestId, LSPSResponseError}; -use core::convert::TryFrom; +use serde::{Deserialize, Serialize}; pub(crate) const LSPS0_LISTPROTOCOLS_METHOD_NAME: &str = "lsps0.list_protocols"; /// A `list_protocols` request. /// -/// Please refer to the [LSPS0 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0#lsps-specification-support-query) +/// Please refer to the [bLIP-50 / LSPS0 +/// specification](https://github.com/lightning/blips/blob/master/blip-0050.md#lsps-specification-support-query) /// for more information. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Default)] -pub struct ListProtocolsRequest {} +pub struct LSPS0ListProtocolsRequest {} /// A response to a `list_protocols` request. /// -/// Please refer to the [LSPS0 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0#lsps-specification-support-query) +/// Please refer to the [bLIP-50 / LSPS0 +/// specification](https://github.com/lightning/blips/blob/master/blip-0050.md#lsps-specification-support-query) /// for more information. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct ListProtocolsResponse { +pub struct LSPS0ListProtocolsResponse { /// A list of supported protocols. pub protocols: Vec, } -/// An LSPS0 protocol request. +/// An bLIP-50 / LSPS0 protocol request. /// -/// Please refer to the [LSPS0 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0) +/// Please refer to the [bLIP-50 / LSPS0 +/// specification](https://github.com/lightning/blips/blob/master/blip-0050.md#lsps-specification-support-query) /// for more information. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS0Request { /// A request calling `list_protocols`. - ListProtocols(ListProtocolsRequest), + ListProtocols(LSPS0ListProtocolsRequest), } impl LSPS0Request { /// Returns the method name associated with the given request variant. - pub fn method(&self) -> &str { + pub fn method(&self) -> &'static str { match self { LSPS0Request::ListProtocols(_) => LSPS0_LISTPROTOCOLS_METHOD_NAME, } } } -/// An LSPS0 protocol request. +/// An bLIP-50 / LSPS0 protocol request. /// -/// Please refer to the [LSPS0 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0) +/// Please refer to the [bLIP-50 / LSPS0 +/// specification](https://github.com/lightning/blips/blob/master/blip-0050.md#lsps-specification-support-query) /// for more information. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS0Response { /// A response to a `list_protocols` request. - ListProtocols(ListProtocolsResponse), + ListProtocols(LSPS0ListProtocolsResponse), /// An error response to a `list_protocols` request. - ListProtocolsError(ResponseError), + ListProtocolsError(LSPSResponseError), } -/// An LSPS0 protocol message. +/// An bLIP-50 / LSPS0 protocol message. /// -/// Please refer to the [LSPS0 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0) +/// Please refer to the [bLIP-50 / LSPS0 +/// specification](https://github.com/lightning/blips/blob/master/blip-0050.md#lsps-specification-support-query) /// for more information. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS0Message { /// A request variant. - Request(RequestId, LSPS0Request), + Request(LSPSRequestId, LSPS0Request), /// A response variant. - Response(RequestId, LSPS0Response), + Response(LSPSRequestId, LSPS0Response), } impl TryFrom for LSPS0Message { @@ -94,7 +99,8 @@ mod tests { use super::*; use crate::lsps0::ser::LSPSMethod; - use crate::prelude::ToString; + + use alloc::string::ToString; #[test] fn deserializes_request() { @@ -112,8 +118,8 @@ mod tests { assert_eq!( msg, LSPSMessage::LSPS0(LSPS0Message::Request( - RequestId("request:id:xyz123".to_string()), - LSPS0Request::ListProtocols(ListProtocolsRequest {}) + LSPSRequestId("request:id:xyz123".to_string()), + LSPS0Request::ListProtocols(LSPS0ListProtocolsRequest {}) )) ); } @@ -121,8 +127,8 @@ mod tests { #[test] fn serializes_request() { let request = LSPSMessage::LSPS0(LSPS0Message::Request( - RequestId("request:id:xyz123".to_string()), - LSPS0Request::ListProtocols(ListProtocolsRequest {}), + LSPSRequestId("request:id:xyz123".to_string()), + LSPS0Request::ListProtocols(LSPS0ListProtocolsRequest {}), )); let json = serde_json::to_string(&request).unwrap(); assert_eq!( @@ -142,7 +148,7 @@ mod tests { }"#; let mut request_id_to_method_map = new_hash_map(); request_id_to_method_map - .insert(RequestId("request:id:xyz123".to_string()), LSPSMethod::LSPS0ListProtocols); + .insert(LSPSRequestId("request:id:xyz123".to_string()), LSPSMethod::LSPS0ListProtocols); let response = LSPSMessage::from_str_with_id_map(json, &mut request_id_to_method_map).unwrap(); @@ -150,8 +156,10 @@ mod tests { assert_eq!( response, LSPSMessage::LSPS0(LSPS0Message::Response( - RequestId("request:id:xyz123".to_string()), - LSPS0Response::ListProtocols(ListProtocolsResponse { protocols: vec![1, 2, 3] }) + LSPSRequestId("request:id:xyz123".to_string()), + LSPS0Response::ListProtocols(LSPS0ListProtocolsResponse { + protocols: vec![1, 2, 3] + }) )) ); } @@ -168,7 +176,7 @@ mod tests { }"#; let mut request_id_to_method_map = new_hash_map(); request_id_to_method_map - .insert(RequestId("request:id:xyz123".to_string()), LSPSMethod::LSPS0ListProtocols); + .insert(LSPSRequestId("request:id:xyz123".to_string()), LSPSMethod::LSPS0ListProtocols); let response = LSPSMessage::from_str_with_id_map(json, &mut request_id_to_method_map).unwrap(); @@ -176,8 +184,8 @@ mod tests { assert_eq!( response, LSPSMessage::LSPS0(LSPS0Message::Response( - RequestId("request:id:xyz123".to_string()), - LSPS0Response::ListProtocolsError(ResponseError { + LSPSRequestId("request:id:xyz123".to_string()), + LSPS0Response::ListProtocolsError(LSPSResponseError { code: -32617, message: "Unknown Error".to_string(), data: None @@ -197,7 +205,7 @@ mod tests { }"#; let mut request_id_to_method_map = new_hash_map(); request_id_to_method_map - .insert(RequestId("request:id:xyz123".to_string()), LSPSMethod::LSPS0ListProtocols); + .insert(LSPSRequestId("request:id:xyz123".to_string()), LSPSMethod::LSPS0ListProtocols); let response = LSPSMessage::from_str_with_id_map(json, &mut request_id_to_method_map); assert!(response.is_err()); @@ -206,8 +214,8 @@ mod tests { #[test] fn serializes_response() { let response = LSPSMessage::LSPS0(LSPS0Message::Response( - RequestId("request:id:xyz123".to_string()), - LSPS0Response::ListProtocols(ListProtocolsResponse { protocols: vec![1, 2, 3] }), + LSPSRequestId("request:id:xyz123".to_string()), + LSPS0Response::ListProtocols(LSPS0ListProtocolsResponse { protocols: vec![1, 2, 3] }), )); let json = serde_json::to_string(&response).unwrap(); assert_eq!( diff --git a/lightning-liquidity/src/lsps0/ser.rs b/lightning-liquidity/src/lsps0/ser.rs index afac232966a..c3fa9553489 100644 --- a/lightning-liquidity/src/lsps0/ser.rs +++ b/lightning-liquidity/src/lsps0/ser.rs @@ -1,9 +1,16 @@ //! Contains basic data types that allow for the (de-)seralization of LSPS messages in the JSON-RPC 2.0 format. //! -//! Please refer to the [LSPS0 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0) for more information. +//! Please refer to the [bLIP-50 / LSPS0 +//! specification](https://github.com/lightning/blips/blob/master/blip-0050.md) for more +//! information. + +use alloc::string::String; + +use core::fmt::{self, Display}; +use core::str::FromStr; use crate::lsps0::msgs::{ - LSPS0Message, LSPS0Request, LSPS0Response, ListProtocolsRequest, + LSPS0ListProtocolsRequest, LSPS0Message, LSPS0Request, LSPS0Response, LSPS0_LISTPROTOCOLS_METHOD_NAME, }; @@ -14,7 +21,7 @@ use crate::lsps1::msgs::{ use crate::lsps2::msgs::{ LSPS2Message, LSPS2Request, LSPS2Response, LSPS2_BUY_METHOD_NAME, LSPS2_GET_INFO_METHOD_NAME, }; -use crate::prelude::{HashMap, String}; +use crate::prelude::HashMap; use lightning::ln::msgs::LightningError; use lightning::ln::wire; @@ -22,8 +29,8 @@ use lightning::util::ser::WithoutLength; use bitcoin::secp256k1::PublicKey; -use core::fmt; -use core::str::FromStr; +#[cfg(feature = "std")] +use std::time::{SystemTime, UNIX_EPOCH}; use serde::de::{self, MapAccess, Visitor}; use serde::ser::SerializeStruct; @@ -136,7 +143,7 @@ pub const LSPS_MESSAGE_TYPE_ID: u16 = 37913; /// /// The messages the protocol uses need to be able to be mapped /// from and into [`LSPSMessage`]. -pub(crate) trait ProtocolMessageHandler { +pub(crate) trait LSPSProtocolMessageHandler { type ProtocolMessage: TryFrom + Into; const PROTOCOL_NUMBER: Option; @@ -182,14 +189,52 @@ impl wire::Type for RawLSPSMessage { /// more information. #[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)] #[serde(transparent)] -pub struct RequestId(pub String); +pub struct LSPSRequestId(pub String); + +/// An object representing datetimes as described in bLIP-50 / LSPS0. +#[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(transparent)] +pub struct LSPSDateTime(chrono::DateTime); + +impl LSPSDateTime { + /// Returns the LSPSDateTime as RFC3339 formatted string. + pub fn to_rfc3339(&self) -> String { + self.0.to_rfc3339() + } + + /// Returns if the given time is in the past. + #[cfg(feature = "std")] + pub fn is_past(&self) -> bool { + let now_seconds_since_epoch = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("system clock to be ahead of the unix epoch") + .as_secs(); + let datetime_seconds_since_epoch = + self.0.timestamp().try_into().expect("expiration to be ahead of unix epoch"); + now_seconds_since_epoch > datetime_seconds_since_epoch + } +} + +impl FromStr for LSPSDateTime { + type Err = (); + fn from_str(s: &str) -> Result { + let datetime = chrono::DateTime::parse_from_rfc3339(s).map_err(|_| ())?; + Ok(Self(datetime.into())) + } +} + +impl Display for LSPSDateTime { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.to_rfc3339()) + } +} /// An error returned in response to an JSON-RPC request. /// /// Please refer to the [JSON-RPC 2.0 specification](https://www.jsonrpc.org/specification#error_object) for /// more information. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct ResponseError { +pub struct LSPSResponseError { /// A number that indicates the error type that occurred. pub code: i32, /// A string providing a short description of the error. @@ -202,7 +247,7 @@ pub struct ResponseError { #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPSMessage { /// An invalid variant. - Invalid(ResponseError), + Invalid(LSPSResponseError), /// An LSPS0 message. LSPS0(LSPS0Message), /// An LSPS1 message. @@ -217,7 +262,7 @@ impl LSPSMessage { /// The given `request_id_to_method` associates request ids with method names, as response objects /// don't carry the latter. pub(crate) fn from_str_with_id_map( - json_str: &str, request_id_to_method_map: &mut HashMap, + json_str: &str, request_id_to_method_map: &mut HashMap, ) -> Result { let deserializer = &mut serde_json::Deserializer::from_str(json_str); let visitor = LSPSMessageVisitor { request_id_to_method_map }; @@ -225,16 +270,16 @@ impl LSPSMessage { } /// Returns the request id and the method. - pub(crate) fn get_request_id_and_method(&self) -> Option<(RequestId, LSPSMethod)> { + pub(crate) fn get_request_id_and_method(&self) -> Option<(LSPSRequestId, LSPSMethod)> { match self { LSPSMessage::LSPS0(LSPS0Message::Request(request_id, request)) => { - Some((RequestId(request_id.0.clone()), request.into())) + Some((LSPSRequestId(request_id.0.clone()), request.into())) }, LSPSMessage::LSPS1(LSPS1Message::Request(request_id, request)) => { - Some((RequestId(request_id.0.clone()), request.into())) + Some((LSPSRequestId(request_id.0.clone()), request.into())) }, LSPSMessage::LSPS2(LSPS2Message::Request(request_id, request)) => { - Some((RequestId(request_id.0.clone()), request.into())) + Some((LSPSRequestId(request_id.0.clone()), request.into())) }, _ => None, } @@ -359,7 +404,7 @@ impl Serialize for LSPSMessage { } struct LSPSMessageVisitor<'a> { - request_id_to_method_map: &'a mut HashMap, + request_id_to_method_map: &'a mut HashMap, } impl<'de, 'a> Visitor<'de> for LSPSMessageVisitor<'a> { @@ -373,11 +418,11 @@ impl<'de, 'a> Visitor<'de> for LSPSMessageVisitor<'a> { where A: MapAccess<'de>, { - let mut id: Option = None; + let mut id: Option = None; let mut method: Option = None; let mut params = None; let mut result = None; - let mut error: Option = None; + let mut error: Option = None; while let Some(key) = map.next_key()? { match key { @@ -426,7 +471,7 @@ impl<'de, 'a> Visitor<'de> for LSPSMessageVisitor<'a> { Some(method) => match method { LSPSMethod::LSPS0ListProtocols => Ok(LSPSMessage::LSPS0(LSPS0Message::Request( id, - LSPS0Request::ListProtocols(ListProtocolsRequest {}), + LSPS0Request::ListProtocols(LSPS0ListProtocolsRequest {}), ))), LSPSMethod::LSPS1GetInfo => { let request = serde_json::from_value(params.unwrap_or(json!({}))) @@ -581,7 +626,7 @@ impl<'de, 'a> Visitor<'de> for LSPSMessageVisitor<'a> { } pub(crate) mod string_amount { - use crate::prelude::{String, ToString}; + use alloc::string::{String, ToString}; use core::str::FromStr; use serde::de::Unexpected; use serde::{Deserialize, Deserializer, Serializer}; @@ -606,7 +651,7 @@ pub(crate) mod string_amount { } pub(crate) mod string_amount_option { - use crate::prelude::{String, ToString}; + use alloc::string::{String, ToString}; use core::str::FromStr; use serde::de::Unexpected; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -635,7 +680,7 @@ pub(crate) mod string_amount_option { } pub(crate) mod unchecked_address { - use crate::prelude::{String, ToString}; + use alloc::string::{String, ToString}; use bitcoin::Address; use core::str::FromStr; use serde::de::Unexpected; @@ -662,7 +707,7 @@ pub(crate) mod unchecked_address { } pub(crate) mod unchecked_address_option { - use crate::prelude::{String, ToString}; + use alloc::string::{String, ToString}; use bitcoin::Address; use core::str::FromStr; use serde::de::Unexpected; diff --git a/lightning-liquidity/src/lsps0/service.rs b/lightning-liquidity/src/lsps0/service.rs index bc52fa11dd9..be9549e7519 100644 --- a/lightning-liquidity/src/lsps0/service.rs +++ b/lightning-liquidity/src/lsps0/service.rs @@ -7,16 +7,17 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Contains the main LSPS0 server-side object, [`LSPS0ServiceHandler`]. +//! Contains the main bLIP-50 / LSPS0 server-side object, [`LSPS0ServiceHandler`]. //! -//! Please refer to the [LSPS0 -//! specifcation](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS0) for more +//! Please refer to the [bLIP-50 / LSPS0 +//! specifcation](https://github.com/lightning/blips/blob/master/blip-0050.md) for more //! information. -use crate::lsps0::msgs::{LSPS0Message, LSPS0Request, LSPS0Response, ListProtocolsResponse}; -use crate::lsps0::ser::{ProtocolMessageHandler, RequestId}; +use alloc::vec::Vec; + +use crate::lsps0::msgs::{LSPS0ListProtocolsResponse, LSPS0Message, LSPS0Request, LSPS0Response}; +use crate::lsps0::ser::{LSPSProtocolMessageHandler, LSPSRequestId}; use crate::message_queue::MessageQueue; -use crate::prelude::Vec; use crate::sync::Arc; use lightning::ln::msgs::{ErrorAction, LightningError}; @@ -24,7 +25,7 @@ use lightning::util::logger::Level; use bitcoin::secp256k1::PublicKey; -/// The main server-side object allowing to send and receive LSPS0 messages. +/// The main server-side object allowing to send and receive bLIP-50 / LSPS0 messages. pub struct LSPS0ServiceHandler { pending_messages: Arc, protocols: Vec, @@ -37,13 +38,13 @@ impl LSPS0ServiceHandler { } fn handle_request( - &self, request_id: RequestId, request: LSPS0Request, counterparty_node_id: &PublicKey, + &self, request_id: LSPSRequestId, request: LSPS0Request, counterparty_node_id: &PublicKey, ) -> Result<(), lightning::ln::msgs::LightningError> { match request { LSPS0Request::ListProtocols(_) => { let msg = LSPS0Message::Response( request_id, - LSPS0Response::ListProtocols(ListProtocolsResponse { + LSPS0Response::ListProtocols(LSPS0ListProtocolsResponse { protocols: self.protocols.clone(), }), ); @@ -54,7 +55,7 @@ impl LSPS0ServiceHandler { } } -impl ProtocolMessageHandler for LSPS0ServiceHandler { +impl LSPSProtocolMessageHandler for LSPS0ServiceHandler { type ProtocolMessage = LSPS0Message; const PROTOCOL_NUMBER: Option = None; @@ -79,7 +80,7 @@ impl ProtocolMessageHandler for LSPS0ServiceHandler { #[cfg(test)] mod tests { - use crate::lsps0::msgs::ListProtocolsRequest; + use crate::lsps0::msgs::LSPS0ListProtocolsRequest; use crate::lsps0::ser::LSPSMessage; use crate::tests::utils; use alloc::string::ToString; @@ -95,8 +96,8 @@ mod tests { let lsps0_handler = Arc::new(LSPS0ServiceHandler::new(protocols, pending_messages.clone())); let list_protocols_request = LSPS0Message::Request( - RequestId("xyz123".to_string()), - LSPS0Request::ListProtocols(ListProtocolsRequest {}), + LSPSRequestId("xyz123".to_string()), + LSPS0Request::ListProtocols(LSPS0ListProtocolsRequest {}), ); let counterparty_node_id = utils::parse_pubkey( "027100442c3b79f606f80f322d98d499eefcb060599efc5d4ecb00209c2cb54190", @@ -114,8 +115,8 @@ mod tests { assert_eq!( *message, LSPSMessage::LSPS0(LSPS0Message::Response( - RequestId("xyz123".to_string()), - LSPS0Response::ListProtocols(ListProtocolsResponse { protocols: vec![] }) + LSPSRequestId("xyz123".to_string()), + LSPS0Response::ListProtocols(LSPS0ListProtocolsResponse { protocols: vec![] }) )) ); } diff --git a/lightning-liquidity/src/lsps1/client.rs b/lightning-liquidity/src/lsps1/client.rs index 75709d512a3..88b3abc5ce3 100644 --- a/lightning-liquidity/src/lsps1/client.rs +++ b/lightning-liquidity/src/lsps1/client.rs @@ -7,17 +7,18 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Contains the main LSPS1 client object, [`LSPS1ClientHandler`]. +//! Contains the main bLIP-51 / LSPS1 client object, [`LSPS1ClientHandler`]. use super::event::LSPS1ClientEvent; use super::msgs::{ - CreateOrderRequest, CreateOrderResponse, GetInfoRequest, GetInfoResponse, GetOrderRequest, - LSPS1Message, LSPS1Request, LSPS1Response, OrderId, OrderParameters, + LSPS1CreateOrderRequest, LSPS1CreateOrderResponse, LSPS1GetInfoRequest, LSPS1GetInfoResponse, + LSPS1GetOrderRequest, LSPS1Message, LSPS1OrderId, LSPS1OrderParams, LSPS1Request, + LSPS1Response, }; use crate::message_queue::MessageQueue; -use crate::events::{Event, EventQueue}; -use crate::lsps0::ser::{ProtocolMessageHandler, RequestId, ResponseError}; +use crate::events::EventQueue; +use crate::lsps0::ser::{LSPSProtocolMessageHandler, LSPSRequestId, LSPSResponseError}; use crate::prelude::{new_hash_map, HashMap, HashSet}; use crate::sync::{Arc, Mutex, RwLock}; @@ -30,7 +31,7 @@ use bitcoin::Address; use core::ops::Deref; -/// Client-side configuration options for LSPS1 channel requests. +/// Client-side configuration options for bLIP-51 / LSPS1 channel requests. #[derive(Clone, Debug)] pub struct LSPS1ClientConfig { /// The maximally allowed channel fees. @@ -39,12 +40,12 @@ pub struct LSPS1ClientConfig { #[derive(Default)] struct PeerState { - pending_get_info_requests: HashSet, - pending_create_order_requests: HashSet, - pending_get_order_requests: HashSet, + pending_get_info_requests: HashSet, + pending_create_order_requests: HashSet, + pending_get_order_requests: HashSet, } -/// The main object allowing to send and receive LSPS1 messages. +/// The main object allowing to send and receive bLIP-51 / LSPS1 messages. pub struct LSPS1ClientHandler where ES::Target: EntropySource, @@ -80,10 +81,10 @@ where /// /// `counterparty_node_id` is the `node_id` of the LSP you would like to use. /// - /// Returns the used [`RequestId`], which will be returned via [`SupportedOptionsReady`]. + /// Returns the used [`LSPSRequestId`], which will be returned via [`SupportedOptionsReady`]. /// /// [`SupportedOptionsReady`]: crate::lsps1::event::LSPS1ClientEvent::SupportedOptionsReady - pub fn request_supported_options(&self, counterparty_node_id: PublicKey) -> RequestId { + pub fn request_supported_options(&self, counterparty_node_id: PublicKey) -> LSPSRequestId { let request_id = crate::utils::generate_request_id(&self.entropy_source); { let mut outer_state_lock = self.per_peer_state.write().unwrap(); @@ -94,14 +95,15 @@ where peer_state_lock.pending_get_info_requests.insert(request_id.clone()); } - let request = LSPS1Request::GetInfo(GetInfoRequest {}); + let request = LSPS1Request::GetInfo(LSPS1GetInfoRequest {}); let msg = LSPS1Message::Request(request_id.clone(), request).into(); self.pending_messages.enqueue(&counterparty_node_id, msg); request_id } fn handle_get_info_response( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, result: GetInfoResponse, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + result: LSPS1GetInfoResponse, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.write().unwrap(); @@ -119,13 +121,11 @@ where }); } - self.pending_events.enqueue(Event::LSPS1Client( - LSPS1ClientEvent::SupportedOptionsReady { - counterparty_node_id: *counterparty_node_id, - supported_options: result.options, - request_id, - }, - )); + self.pending_events.enqueue(LSPS1ClientEvent::SupportedOptionsReady { + counterparty_node_id: *counterparty_node_id, + supported_options: result.options, + request_id, + }); Ok(()) }, None => Err(LightningError { @@ -139,7 +139,8 @@ where } fn handle_get_info_error( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, error: ResponseError, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + error: LSPSResponseError, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -156,13 +157,11 @@ where }); } - self.pending_events.enqueue(Event::LSPS1Client( - LSPS1ClientEvent::SupportedOptionsRequestFailed { - request_id: request_id.clone(), - counterparty_node_id: *counterparty_node_id, - error: error.clone(), - }, - )); + self.pending_events.enqueue(LSPS1ClientEvent::SupportedOptionsRequestFailed { + request_id: request_id.clone(), + counterparty_node_id: *counterparty_node_id, + error: error.clone(), + }); Err(LightningError { err: format!( @@ -188,9 +187,9 @@ where /// /// The client agrees to paying channel fees according to the provided parameters. pub fn create_order( - &self, counterparty_node_id: &PublicKey, order: OrderParameters, + &self, counterparty_node_id: &PublicKey, order: LSPS1OrderParams, refund_onchain_address: Option
, - ) -> RequestId { + ) -> LSPSRequestId { let (request_id, request_msg) = { let mut outer_state_lock = self.per_peer_state.write().unwrap(); let inner_state_lock = outer_state_lock @@ -199,8 +198,10 @@ where let mut peer_state_lock = inner_state_lock.lock().unwrap(); let request_id = crate::utils::generate_request_id(&self.entropy_source); - let request = - LSPS1Request::CreateOrder(CreateOrderRequest { order, refund_onchain_address }); + let request = LSPS1Request::CreateOrder(LSPS1CreateOrderRequest { + order, + refund_onchain_address, + }); let msg = LSPS1Message::Request(request_id.clone(), request).into(); peer_state_lock.pending_create_order_requests.insert(request_id.clone()); @@ -215,8 +216,8 @@ where } fn handle_create_order_response( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, - response: CreateOrderResponse, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + response: LSPS1CreateOrderResponse, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -233,14 +234,14 @@ where }); } - self.pending_events.enqueue(Event::LSPS1Client(LSPS1ClientEvent::OrderCreated { + self.pending_events.enqueue(LSPS1ClientEvent::OrderCreated { request_id, counterparty_node_id: *counterparty_node_id, order_id: response.order_id, order: response.order, payment: response.payment, channel: response.channel, - })); + }); }, None => { return Err(LightningError { @@ -257,7 +258,8 @@ where } fn handle_create_order_error( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, error: ResponseError, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + error: LSPSResponseError, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -274,13 +276,11 @@ where }); } - self.pending_events.enqueue(Event::LSPS1Client( - LSPS1ClientEvent::OrderRequestFailed { - request_id: request_id.clone(), - counterparty_node_id: *counterparty_node_id, - error: error.clone(), - }, - )); + self.pending_events.enqueue(LSPS1ClientEvent::OrderRequestFailed { + request_id: request_id.clone(), + counterparty_node_id: *counterparty_node_id, + error: error.clone(), + }); Err(LightningError { err: format!( @@ -308,8 +308,8 @@ where /// /// [`LSPS1ClientEvent::OrderStatus`]: crate::lsps1::event::LSPS1ClientEvent::OrderStatus pub fn check_order_status( - &self, counterparty_node_id: &PublicKey, order_id: OrderId, - ) -> RequestId { + &self, counterparty_node_id: &PublicKey, order_id: LSPS1OrderId, + ) -> LSPSRequestId { let (request_id, request_msg) = { let mut outer_state_lock = self.per_peer_state.write().unwrap(); let inner_state_lock = outer_state_lock @@ -320,7 +320,8 @@ where let request_id = crate::utils::generate_request_id(&self.entropy_source); peer_state_lock.pending_get_order_requests.insert(request_id.clone()); - let request = LSPS1Request::GetOrder(GetOrderRequest { order_id: order_id.clone() }); + let request = + LSPS1Request::GetOrder(LSPS1GetOrderRequest { order_id: order_id.clone() }); let msg = LSPS1Message::Request(request_id.clone(), request).into(); (request_id, Some(msg)) @@ -334,8 +335,8 @@ where } fn handle_get_order_response( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, - response: CreateOrderResponse, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + response: LSPS1CreateOrderResponse, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -352,14 +353,14 @@ where }); } - self.pending_events.enqueue(Event::LSPS1Client(LSPS1ClientEvent::OrderStatus { + self.pending_events.enqueue(LSPS1ClientEvent::OrderStatus { request_id, counterparty_node_id: *counterparty_node_id, order_id: response.order_id, order: response.order, payment: response.payment, channel: response.channel, - })); + }); }, None => { return Err(LightningError { @@ -376,7 +377,8 @@ where } fn handle_get_order_error( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, error: ResponseError, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + error: LSPSResponseError, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -393,13 +395,11 @@ where }); } - self.pending_events.enqueue(Event::LSPS1Client( - LSPS1ClientEvent::OrderRequestFailed { - request_id: request_id.clone(), - counterparty_node_id: *counterparty_node_id, - error: error.clone(), - }, - )); + self.pending_events.enqueue(LSPS1ClientEvent::OrderRequestFailed { + request_id: request_id.clone(), + counterparty_node_id: *counterparty_node_id, + error: error.clone(), + }); Err(LightningError { err: format!( @@ -422,7 +422,7 @@ where } } -impl ProtocolMessageHandler for LSPS1ClientHandler +impl LSPSProtocolMessageHandler for LSPS1ClientHandler where ES::Target: EntropySource, { diff --git a/lightning-liquidity/src/lsps1/event.rs b/lightning-liquidity/src/lsps1/event.rs index ff4961d49b8..508a5a42a90 100644 --- a/lightning-liquidity/src/lsps1/event.rs +++ b/lightning-liquidity/src/lsps1/event.rs @@ -7,16 +7,16 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Contains LSPS1 event types +//! Contains bLIP-51 / LSPS1 event types -use super::msgs::OrderId; -use super::msgs::{ChannelInfo, LSPS1Options, OrderParameters, PaymentInfo}; +use super::msgs::LSPS1OrderId; +use super::msgs::{LSPS1ChannelInfo, LSPS1Options, LSPS1OrderParams, LSPS1PaymentInfo}; -use crate::lsps0::ser::{RequestId, ResponseError}; +use crate::lsps0::ser::{LSPSRequestId, LSPSResponseError}; use bitcoin::secp256k1::PublicKey; -/// An event which an LSPS1 client should take some action in response to. +/// An event which an bLIP-51 / LSPS1 client should take some action in response to. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS1ClientEvent { /// A request previously issued via [`LSPS1ClientHandler::request_supported_options`] @@ -28,13 +28,13 @@ pub enum LSPS1ClientEvent { /// [`LSPS1ClientHandler::request_supported_options`]: crate::lsps1::client::LSPS1ClientHandler::request_supported_options /// [`LSPS1ClientHandler::create_order`]: crate::lsps1::client::LSPS1ClientHandler::create_order SupportedOptionsReady { - /// The identifier of the issued LSPS1 `get_info` request, as returned by + /// The identifier of the issued bLIP-51 / LSPS1 `get_info` request, as returned by /// [`LSPS1ClientHandler::request_supported_options`] /// /// This can be used to track which request this event corresponds to. /// /// [`LSPS1ClientHandler::request_supported_options`]: crate::lsps1::client::LSPS1ClientHandler::request_supported_options - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the LSP that provided this response. counterparty_node_id: PublicKey, /// All options supported by the LSP. @@ -45,17 +45,17 @@ pub enum LSPS1ClientEvent { /// /// [`LSPS1ClientHandler::request_supported_options`]: crate::lsps1::client::LSPS1ClientHandler::request_supported_options SupportedOptionsRequestFailed { - /// The identifier of the issued LSPS1 `get_info` request, as returned by + /// The identifier of the issued bLIP-51 / LSPS1 `get_info` request, as returned by /// [`LSPS1ClientHandler::request_supported_options`] /// /// This can be used to track which request this event corresponds to. /// /// [`LSPS1ClientHandler::request_supported_options`]: crate::lsps1::client::LSPS1ClientHandler::request_supported_options - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the LSP that provided this response. counterparty_node_id: PublicKey, /// The error that was returned. - error: ResponseError, + error: LSPSResponseError, }, /// Confirmation from the LSP about the order created by the client. /// @@ -68,23 +68,23 @@ pub enum LSPS1ClientEvent { /// /// [`LSPS1ClientHandler::check_order_status`]: crate::lsps1::client::LSPS1ClientHandler::check_order_status OrderCreated { - /// The identifier of the issued LSPS1 `create_order` request, as returned by + /// The identifier of the issued bLIP-51 / LSPS1 `create_order` request, as returned by /// [`LSPS1ClientHandler::create_order`] /// /// This can be used to track which request this event corresponds to. /// /// [`LSPS1ClientHandler::create_order`]: crate::lsps1::client::LSPS1ClientHandler::create_order - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the LSP. counterparty_node_id: PublicKey, /// The id of the channel order. - order_id: OrderId, + order_id: LSPS1OrderId, /// The order created by client and approved by LSP. - order: OrderParameters, + order: LSPS1OrderParams, /// The details regarding payment of the order - payment: PaymentInfo, + payment: LSPS1PaymentInfo, /// The details regarding state of the channel ordered. - channel: Option, + channel: Option, }, /// Information from the LSP about the status of a previously created order. /// @@ -92,23 +92,23 @@ pub enum LSPS1ClientEvent { /// /// [`LSPS1ClientHandler::check_order_status`]: crate::lsps1::client::LSPS1ClientHandler::check_order_status OrderStatus { - /// The identifier of the issued LSPS1 `get_order` request, as returned by + /// The identifier of the issued bLIP-51 / LSPS1 `get_order` request, as returned by /// [`LSPS1ClientHandler::check_order_status`] /// /// This can be used to track which request this event corresponds to. /// /// [`LSPS1ClientHandler::check_order_status`]: crate::lsps1::client::LSPS1ClientHandler::check_order_status - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the LSP. counterparty_node_id: PublicKey, /// The id of the channel order. - order_id: OrderId, + order_id: LSPS1OrderId, /// The order created by client and approved by LSP. - order: OrderParameters, + order: LSPS1OrderParams, /// The details regarding payment of the order - payment: PaymentInfo, + payment: LSPS1PaymentInfo, /// The details regarding state of the channel ordered. - channel: Option, + channel: Option, }, /// A request previously issued via [`LSPS1ClientHandler::create_order`] or [`LSPS1ClientHandler::check_order_status`]. /// failed as the LSP returned an error response. @@ -123,11 +123,11 @@ pub enum LSPS1ClientEvent { /// /// [`LSPS1ClientHandler::create_order`]: crate::lsps1::client::LSPS1ClientHandler::create_order /// [`LSPS1ClientHandler::check_order_status`]: crate::lsps1::client::LSPS1ClientHandler::check_order_status - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the LSP. counterparty_node_id: PublicKey, /// The error that was returned. - error: ResponseError, + error: LSPSResponseError, }, } @@ -147,11 +147,11 @@ pub enum LSPS1ServiceEvent { /// An identifier that must be passed to [`LSPS1ServiceHandler::send_payment_details`]. /// /// [`LSPS1ServiceHandler::send_payment_details`]: crate::lsps1::service::LSPS1ServiceHandler::send_payment_details - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the client making the information request. counterparty_node_id: PublicKey, /// The order requested by the client. - order: OrderParameters, + order: LSPS1OrderParams, }, /// A request from client to check the status of the payment. /// @@ -165,19 +165,19 @@ pub enum LSPS1ServiceEvent { /// An identifier that must be passed to [`LSPS1ServiceHandler::update_order_status`]. /// /// [`LSPS1ServiceHandler::update_order_status`]: crate::lsps1::service::LSPS1ServiceHandler::update_order_status - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the client making the information request. counterparty_node_id: PublicKey, /// The order id of order with pending payment. - order_id: OrderId, + order_id: LSPS1OrderId, }, /// If error is encountered, refund the amount if paid by the client. Refund { /// An identifier. - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the client making the information request. counterparty_node_id: PublicKey, /// The order id of the refunded order. - order_id: OrderId, + order_id: LSPS1OrderId, }, } diff --git a/lightning-liquidity/src/lsps1/mod.rs b/lightning-liquidity/src/lsps1/mod.rs index d04a26b29c1..b068b186610 100644 --- a/lightning-liquidity/src/lsps1/mod.rs +++ b/lightning-liquidity/src/lsps1/mod.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Types and primitives that implement the LSPS1: Channel Request specification. +//! Types and primitives that implement the bLIP-51 / LSPS1: Channel Request specification. pub mod client; pub mod event; diff --git a/lightning-liquidity/src/lsps1/msgs.rs b/lightning-liquidity/src/lsps1/msgs.rs index 42f10c04772..dbdeff84997 100644 --- a/lightning-liquidity/src/lsps1/msgs.rs +++ b/lightning-liquidity/src/lsps1/msgs.rs @@ -1,22 +1,20 @@ -//! Message, request, and other primitive types used to implement LSPS1. +//! Message, request, and other primitive types used to implement bLIP-51 / LSPS1. + +use alloc::string::String; + +use core::convert::TryFrom; use crate::lsps0::ser::{ - string_amount, u32_fee_rate, unchecked_address, unchecked_address_option, LSPSMessage, - RequestId, ResponseError, + string_amount, u32_fee_rate, unchecked_address, unchecked_address_option, LSPSDateTime, + LSPSMessage, LSPSRequestId, LSPSResponseError, }; -use crate::prelude::String; - use bitcoin::{Address, FeeRate, OutPoint}; use lightning_invoice::Bolt11Invoice; use serde::{Deserialize, Serialize}; -use chrono::Utc; - -use core::convert::TryFrom; - pub(crate) const LSPS1_GET_INFO_METHOD_NAME: &str = "lsps1.get_info"; pub(crate) const LSPS1_CREATE_ORDER_METHOD_NAME: &str = "lsps1.create_order"; pub(crate) const LSPS1_GET_ORDER_METHOD_NAME: &str = "lsps1.get_order"; @@ -27,15 +25,16 @@ pub(crate) const LSPS1_CREATE_ORDER_REQUEST_ORDER_MISMATCH_ERROR_CODE: i32 = 100 /// The identifier of an order. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Hash)] -pub struct OrderId(pub String); +pub struct LSPS1OrderId(pub String); /// A request made to an LSP to retrieve the supported options. /// -/// Please refer to the [LSPS1 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS1#1-lsps1info) -/// for more information. +/// Please refer to the [bLIP-51 / LSPS1 +/// specification](https://github.com/lightning/blips/blob/master/blip-0051.md#1-lsps1get_info) for +/// more information. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Default)] #[serde(default)] -pub struct GetInfoRequest {} +pub struct LSPS1GetInfoRequest {} /// An object representing the supported protocol options. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] @@ -68,9 +67,9 @@ pub struct LSPS1Options { pub max_channel_balance_sat: u64, } -/// A response to a [`GetInfoRequest`]. +/// A response to a [`LSPS1GetInfoRequest`]. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct GetInfoResponse { +pub struct LSPS1GetInfoResponse { /// All options supported by the LSP. #[serde(flatten)] pub options: LSPS1Options, @@ -78,13 +77,14 @@ pub struct GetInfoResponse { /// A request made to an LSP to create an order. /// -/// Please refer to the [LSPS1 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS1#2-lsps1create_order) +/// Please refer to the [bLIP-51 / LSPS1 +/// specification](https://github.com/lightning/blips/blob/master/blip-0051.md#2-lsps1create_order) /// for more information. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct CreateOrderRequest { +pub struct LSPS1CreateOrderRequest { /// The order made. #[serde(flatten)] - pub order: OrderParameters, + pub order: LSPS1OrderParams, /// The address where the LSP will send the funds if the order fails. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] @@ -92,9 +92,9 @@ pub struct CreateOrderRequest { pub refund_onchain_address: Option
, } -/// An object representing an LSPS1 channel order. +/// An object representing an bLIP-51 / LSPS1 channel order. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct OrderParameters { +pub struct LSPS1OrderParams { /// Indicates how many satoshi the LSP will provide on their side. #[serde(with = "string_amount")] pub lsp_balance_sat: u64, @@ -116,28 +116,28 @@ pub struct OrderParameters { pub announce_channel: bool, } -/// A response to a [`CreateOrderRequest`]. +/// A response to a [`LSPS1CreateOrderRequest`]. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct CreateOrderResponse { +pub struct LSPS1CreateOrderResponse { /// The id of the channel order. - pub order_id: OrderId, + pub order_id: LSPS1OrderId, /// The parameters of channel order. #[serde(flatten)] - pub order: OrderParameters, + pub order: LSPS1OrderParams, /// The datetime when the order was created - pub created_at: chrono::DateTime, + pub created_at: LSPSDateTime, /// The current state of the order. - pub order_state: OrderState, + pub order_state: LSPS1OrderState, /// Contains details about how to pay for the order. - pub payment: PaymentInfo, + pub payment: LSPS1PaymentInfo, /// Contains information about the channel state. - pub channel: Option, + pub channel: Option, } -/// An object representing the state of an order. +/// An object representing the status of an order. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum OrderState { +pub enum LSPS1OrderState { /// The order has been created. Created, /// The LSP has opened the channel and published the funding transaction. @@ -148,20 +148,20 @@ pub enum OrderState { /// Details regarding how to pay for an order. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct PaymentInfo { +pub struct LSPS1PaymentInfo { /// A Lightning payment using BOLT 11. - pub bolt11: Option, + pub bolt11: Option, /// An onchain payment. - pub onchain: Option, + pub onchain: Option, } /// A Lightning payment using BOLT 11. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct Bolt11PaymentInfo { +pub struct LSPS1Bolt11PaymentInfo { /// Indicates the current state of the payment. - pub state: PaymentState, + pub state: LSPS1PaymentState, /// The datetime when the payment option expires. - pub expires_at: chrono::DateTime, + pub expires_at: LSPSDateTime, /// The total fee the LSP will charge to open this channel in satoshi. #[serde(with = "string_amount")] pub fee_total_sat: u64, @@ -174,11 +174,11 @@ pub struct Bolt11PaymentInfo { /// An onchain payment. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct OnchainPaymentInfo { +pub struct LSPS1OnchainPaymentInfo { /// Indicates the current state of the payment. - pub state: PaymentState, + pub state: LSPS1PaymentState, /// The datetime when the payment option expires. - pub expires_at: chrono::DateTime, + pub expires_at: LSPSDateTime, /// The total fee the LSP will charge to open this channel in satoshi. #[serde(with = "string_amount")] pub fee_total_sat: u64, @@ -209,7 +209,7 @@ pub struct OnchainPaymentInfo { /// been deprecated and `REFUNDED` should be used instead. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum PaymentState { +pub enum LSPS1PaymentState { /// A payment is expected. ExpectPayment, /// A sufficient payment has been received. @@ -221,7 +221,7 @@ pub enum PaymentState { /// Details regarding a detected on-chain payment. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct OnchainPayment { +pub struct LSPS1OnchainPayment { /// The outpoint of the payment. pub outpoint: String, /// The amount of satoshi paid. @@ -233,60 +233,61 @@ pub struct OnchainPayment { /// Details regarding the state of an ordered channel. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct ChannelInfo { +pub struct LSPS1ChannelInfo { /// The datetime when the funding transaction has been published. - pub funded_at: chrono::DateTime, + pub funded_at: LSPSDateTime, /// The outpoint of the funding transaction. pub funding_outpoint: OutPoint, /// The earliest datetime when the channel may be closed by the LSP. - pub expires_at: chrono::DateTime, + pub expires_at: LSPSDateTime, } /// A request made to an LSP to retrieve information about an previously made order. /// -/// Please refer to the [LSPS1 specification](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS1#21-lsps1get_order) +/// Please refer to the [bLIP-51 / LSPS1 +/// specification](https://github.com/lightning/blips/blob/master/blip-0051.md#21-lsps1get_order) /// for more information. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct GetOrderRequest { +pub struct LSPS1GetOrderRequest { /// The id of the order. - pub order_id: OrderId, + pub order_id: LSPS1OrderId, } -/// An enum that captures all the valid JSON-RPC requests in the LSPS1 protocol. +/// An enum that captures all the valid JSON-RPC requests in the bLIP-51 / LSPS1 protocol. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS1Request { /// A request to learn about the options supported by the LSP. - GetInfo(GetInfoRequest), + GetInfo(LSPS1GetInfoRequest), /// A request to create a channel order. - CreateOrder(CreateOrderRequest), + CreateOrder(LSPS1CreateOrderRequest), /// A request to query a previously created channel order. - GetOrder(GetOrderRequest), + GetOrder(LSPS1GetOrderRequest), } -/// An enum that captures all the valid JSON-RPC responses in the LSPS1 protocol. +/// An enum that captures all the valid JSON-RPC responses in the bLIP-51 / LSPS1 protocol. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS1Response { - /// A successful response to a [`GetInfoRequest`]. - GetInfo(GetInfoResponse), - /// An error response to a [`GetInfoRequest`]. - GetInfoError(ResponseError), - /// A successful response to a [`CreateOrderRequest`]. - CreateOrder(CreateOrderResponse), - /// An error response to a [`CreateOrderRequest`]. - CreateOrderError(ResponseError), - /// A successful response to a [`GetOrderRequest`]. - GetOrder(CreateOrderResponse), - /// An error response to a [`GetOrderRequest`]. - GetOrderError(ResponseError), + /// A successful response to a [`LSPS1GetInfoRequest`]. + GetInfo(LSPS1GetInfoResponse), + /// An error response to a [`LSPS1GetInfoRequest`]. + GetInfoError(LSPSResponseError), + /// A successful response to a [`LSPS1CreateOrderRequest`]. + CreateOrder(LSPS1CreateOrderResponse), + /// An error response to a [`LSPS1CreateOrderRequest`]. + CreateOrderError(LSPSResponseError), + /// A successful response to a [`LSPS1GetOrderRequest`]. + GetOrder(LSPS1CreateOrderResponse), + /// An error response to a [`LSPS1GetOrderRequest`]. + GetOrderError(LSPSResponseError), } -/// An enum that captures all valid JSON-RPC messages in the LSPS1 protocol. +/// An enum that captures all valid JSON-RPC messages in the bLIP-51 / LSPS1 protocol. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS1Message { /// An LSPS1 JSON-RPC request. - Request(RequestId, LSPS1Request), + Request(LSPSRequestId, LSPS1Request), /// An LSPS1 JSON-RPC response. - Response(RequestId, LSPS1Response), + Response(LSPSRequestId, LSPS1Response), } impl TryFrom for LSPS1Message { @@ -348,7 +349,7 @@ mod tests { fn parse_spec_test_vectors() { // Here, we simply assert that we're able to parse all examples given in LSPS1. let json_str = r#"{}"#; - let _get_info_request: GetInfoRequest = serde_json::from_str(json_str).unwrap(); + let _get_info_request: LSPS1GetInfoRequest = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "min_required_channel_confirmations": 0, @@ -362,7 +363,7 @@ mod tests { "min_channel_balance_sat": "50000", "max_channel_balance_sat": "100000000" }"#; - let _get_info_response: GetInfoResponse = serde_json::from_str(json_str).unwrap(); + let _get_info_response: LSPS1GetInfoResponse = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "lsp_balance_sat": "5000000", @@ -374,7 +375,8 @@ mod tests { "refund_onchain_address": "bc1qvmsy0f3yyes6z9jvddk8xqwznndmdwapvrc0xrmhd3vqj5rhdrrq6hz49h", "announce_channel": true }"#; - let _create_order_request: CreateOrderRequest = serde_json::from_str(json_str).unwrap(); + let _create_order_request: LSPS1CreateOrderRequest = + serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "state" : "EXPECT_PAYMENT", @@ -383,7 +385,7 @@ mod tests { "order_total_sat": "200888", "invoice": "lnbc252u1p3aht9ysp580g4633gd2x9lc5al0wd8wx0mpn9748jeyz46kqjrpxn52uhfpjqpp5qgf67tcqmuqehzgjm8mzya90h73deafvr4m5705l5u5l4r05l8cqdpud3h8ymm4w3jhytnpwpczqmt0de6xsmre2pkxzm3qydmkzdjrdev9s7zhgfaqxqyjw5qcqpjrzjqt6xptnd85lpqnu2lefq4cx070v5cdwzh2xlvmdgnu7gqp4zvkus5zapryqqx9qqqyqqqqqqqqqqqcsq9q9qyysgqen77vu8xqjelum24hgjpgfdgfgx4q0nehhalcmuggt32japhjuksq9jv6eksjfnppm4hrzsgyxt8y8xacxut9qv3fpyetz8t7tsymygq8yzn05" }"#; - let _bolt11_payment: Bolt11PaymentInfo = serde_json::from_str(json_str).unwrap(); + let _bolt11_payment: LSPS1Bolt11PaymentInfo = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "state": "EXPECT_PAYMENT", @@ -394,7 +396,7 @@ mod tests { "min_onchain_payment_confirmations": 1, "min_fee_for_0conf": 253 }"#; - let _onchain_payment: OnchainPaymentInfo = serde_json::from_str(json_str).unwrap(); + let _onchain_payment: LSPS1OnchainPaymentInfo = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "bolt11": { @@ -414,7 +416,7 @@ mod tests { "min_fee_for_0conf": 253 } }"#; - let _payment: PaymentInfo = serde_json::from_str(json_str).unwrap(); + let _payment: LSPS1PaymentInfo = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "order_id": "bb4b5d0a-8334-49d8-9463-90a6d413af7c", @@ -448,26 +450,27 @@ mod tests { }, "channel": null }"#; - let _create_order_response: CreateOrderResponse = serde_json::from_str(json_str).unwrap(); + let _create_order_response: LSPS1CreateOrderResponse = + serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "order_id": "bb4b5d0a-8334-49d8-9463-90a6d413af7c" }"#; - let _get_order_request: GetOrderRequest = serde_json::from_str(json_str).unwrap(); + let _get_order_request: LSPS1GetOrderRequest = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "funded_at": "2012-04-23T18:25:43.511Z", "funding_outpoint": "0301e0480b374b32851a9462db29dc19fe830a7f7d7a88b81612b9d42099c0ae:0", "expires_at": "2012-04-23T18:25:43.511Z" }"#; - let _channel: ChannelInfo = serde_json::from_str(json_str).unwrap(); + let _channel: LSPS1ChannelInfo = serde_json::from_str(json_str).unwrap(); let json_str = r#""CANCELLED""#; - let payment_state: PaymentState = serde_json::from_str(json_str).unwrap(); - assert_eq!(payment_state, PaymentState::Refunded); + let payment_state: LSPS1PaymentState = serde_json::from_str(json_str).unwrap(); + assert_eq!(payment_state, LSPS1PaymentState::Refunded); let json_str = r#""REFUNDED""#; - let payment_state: PaymentState = serde_json::from_str(json_str).unwrap(); - assert_eq!(payment_state, PaymentState::Refunded); + let payment_state: LSPS1PaymentState = serde_json::from_str(json_str).unwrap(); + assert_eq!(payment_state, LSPS1PaymentState::Refunded); } } diff --git a/lightning-liquidity/src/lsps1/service.rs b/lightning-liquidity/src/lsps1/service.rs index 6520adcf69b..72770be1daa 100644 --- a/lightning-liquidity/src/lsps1/service.rs +++ b/lightning-liquidity/src/lsps1/service.rs @@ -7,20 +7,26 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Contains the main LSPS1 server object, [`LSPS1ServiceHandler`]. +//! Contains the main bLIP-51 / LSPS1 server object, [`LSPS1ServiceHandler`]. + +use alloc::string::String; + +use core::ops::Deref; use super::event::LSPS1ServiceEvent; use super::msgs::{ - ChannelInfo, CreateOrderRequest, CreateOrderResponse, GetInfoResponse, GetOrderRequest, - LSPS1Message, LSPS1Options, LSPS1Request, LSPS1Response, OrderId, OrderParameters, OrderState, - PaymentInfo, LSPS1_CREATE_ORDER_REQUEST_ORDER_MISMATCH_ERROR_CODE, + LSPS1ChannelInfo, LSPS1CreateOrderRequest, LSPS1CreateOrderResponse, LSPS1GetInfoResponse, + LSPS1GetOrderRequest, LSPS1Message, LSPS1Options, LSPS1OrderId, LSPS1OrderParams, + LSPS1OrderState, LSPS1PaymentInfo, LSPS1Request, LSPS1Response, + LSPS1_CREATE_ORDER_REQUEST_ORDER_MISMATCH_ERROR_CODE, }; -use super::utils::is_valid; use crate::message_queue::MessageQueue; -use crate::events::{Event, EventQueue}; -use crate::lsps0::ser::{ProtocolMessageHandler, RequestId, ResponseError}; -use crate::prelude::{new_hash_map, HashMap, String, ToString}; +use crate::events::EventQueue; +use crate::lsps0::ser::{ + LSPSDateTime, LSPSProtocolMessageHandler, LSPSRequestId, LSPSResponseError, +}; +use crate::prelude::{new_hash_map, HashMap}; use crate::sync::{Arc, Mutex, RwLock}; use crate::utils; @@ -34,9 +40,8 @@ use lightning::util::logger::Level; use bitcoin::secp256k1::PublicKey; use chrono::Utc; -use core::ops::Deref; -/// Server-side configuration options for LSPS1 channel requests. +/// Server-side configuration options for bLIP-51 / LSPS1 channel requests. #[derive(Clone, Debug)] pub struct LSPS1ServiceConfig { /// A token to be send with each channel request. @@ -55,8 +60,8 @@ impl From for LightningError { #[derive(PartialEq, Debug)] enum OutboundRequestState { - OrderCreated { order_id: OrderId }, - WaitingPayment { order_id: OrderId }, + OrderCreated { order_id: LSPS1OrderId }, + WaitingPayment { order_id: LSPS1OrderId }, Ready, } @@ -72,9 +77,9 @@ impl OutboundRequestState { } struct OutboundLSPS1Config { - order: OrderParameters, - created_at: chrono::DateTime, - payment: PaymentInfo, + order: LSPS1OrderParams, + created_at: LSPSDateTime, + payment: LSPS1PaymentInfo, } struct OutboundCRChannel { @@ -84,8 +89,8 @@ struct OutboundCRChannel { impl OutboundCRChannel { fn new( - order: OrderParameters, created_at: chrono::DateTime, order_id: OrderId, - payment: PaymentInfo, + order: LSPS1OrderParams, created_at: LSPSDateTime, order_id: LSPS1OrderId, + payment: LSPS1PaymentInfo, ) -> Self { Self { state: OutboundRequestState::OrderCreated { order_id }, @@ -106,26 +111,26 @@ impl OutboundCRChannel { #[derive(Default)] struct PeerState { - outbound_channels_by_order_id: HashMap, - request_to_cid: HashMap, - pending_requests: HashMap, + outbound_channels_by_order_id: HashMap, + request_to_cid: HashMap, + pending_requests: HashMap, } impl PeerState { - fn insert_outbound_channel(&mut self, order_id: OrderId, channel: OutboundCRChannel) { + fn insert_outbound_channel(&mut self, order_id: LSPS1OrderId, channel: OutboundCRChannel) { self.outbound_channels_by_order_id.insert(order_id, channel); } - fn insert_request(&mut self, request_id: RequestId, channel_id: u128) { + fn insert_request(&mut self, request_id: LSPSRequestId, channel_id: u128) { self.request_to_cid.insert(request_id, channel_id); } - fn remove_outbound_channel(&mut self, order_id: OrderId) { + fn remove_outbound_channel(&mut self, order_id: LSPS1OrderId) { self.outbound_channels_by_order_id.remove(&order_id); } } -/// The main object allowing to send and receive LSPS1 messages. +/// The main object allowing to send and receive bLIP-51 / LSPS1 messages. pub struct LSPS1ServiceHandler where ES::Target: EntropySource, @@ -165,9 +170,9 @@ where } fn handle_get_info_request( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, ) -> Result<(), LightningError> { - let response = LSPS1Response::GetInfo(GetInfoResponse { + let response = LSPS1Response::GetInfo(LSPS1GetInfoResponse { options: self .config .supported_options @@ -185,10 +190,11 @@ where } fn handle_create_order_request( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, params: CreateOrderRequest, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + params: LSPS1CreateOrderRequest, ) -> Result<(), LightningError> { if !is_valid(¶ms.order, &self.config.supported_options.as_ref().unwrap()) { - let response = LSPS1Response::CreateOrderError(ResponseError { + let response = LSPS1Response::CreateOrderError(LSPSResponseError { code: LSPS1_CREATE_ORDER_REQUEST_ORDER_MISMATCH_ERROR_CODE, message: format!("Order does not match options supported by LSP server"), data: Some(format!( @@ -220,13 +226,11 @@ where .insert(request_id.clone(), LSPS1Request::CreateOrder(params.clone())); } - self.pending_events.enqueue(Event::LSPS1Service( - LSPS1ServiceEvent::RequestForPaymentDetails { - request_id, - counterparty_node_id: *counterparty_node_id, - order: params.order, - }, - )); + self.pending_events.enqueue(LSPS1ServiceEvent::RequestForPaymentDetails { + request_id, + counterparty_node_id: *counterparty_node_id, + order: params.order, + }); Ok(()) } @@ -237,8 +241,8 @@ where /// /// [`LSPS1ServiceEvent::RequestForPaymentDetails`]: crate::lsps1::event::LSPS1ServiceEvent::RequestForPaymentDetails pub fn send_payment_details( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, payment: PaymentInfo, - created_at: chrono::DateTime, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + payment: LSPS1PaymentInfo, created_at: LSPSDateTime, ) -> Result<(), APIError> { let (result, response) = { let outer_state_lock = self.per_peer_state.read().unwrap(); @@ -259,10 +263,10 @@ where peer_state_lock.insert_outbound_channel(order_id.clone(), channel); - let response = LSPS1Response::CreateOrder(CreateOrderResponse { + let response = LSPS1Response::CreateOrder(LSPS1CreateOrderResponse { order: params.order, order_id, - order_state: OrderState::Created, + order_state: LSPS1OrderState::Created, created_at, payment, channel: None, @@ -303,10 +307,11 @@ where } fn handle_get_order_request( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, params: GetOrderRequest, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + params: LSPS1GetOrderRequest, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); - match outer_state_lock.get(&counterparty_node_id) { + match outer_state_lock.get(counterparty_node_id) { Some(inner_state_lock) => { let mut peer_state_lock = inner_state_lock.lock().unwrap(); @@ -323,11 +328,11 @@ where if let Err(e) = outbound_channel.awaiting_payment() { peer_state_lock.outbound_channels_by_order_id.remove(¶ms.order_id); - self.pending_events.enqueue(Event::LSPS1Service(LSPS1ServiceEvent::Refund { + self.pending_events.enqueue(LSPS1ServiceEvent::Refund { request_id, counterparty_node_id: *counterparty_node_id, order_id: params.order_id, - })); + }); return Err(e); } @@ -335,13 +340,11 @@ where .pending_requests .insert(request_id.clone(), LSPS1Request::GetOrder(params.clone())); - self.pending_events.enqueue(Event::LSPS1Service( - LSPS1ServiceEvent::CheckPaymentConfirmation { - request_id, - counterparty_node_id: *counterparty_node_id, - order_id: params.order_id, - }, - )); + self.pending_events.enqueue(LSPS1ServiceEvent::CheckPaymentConfirmation { + request_id, + counterparty_node_id: *counterparty_node_id, + order_id: params.order_id, + }); }, None => { return Err(LightningError { @@ -363,8 +366,8 @@ where /// /// [`LSPS1ServiceEvent::CheckPaymentConfirmation`]: crate::lsps1::event::LSPS1ServiceEvent::CheckPaymentConfirmation pub fn update_order_status( - &self, request_id: RequestId, counterparty_node_id: PublicKey, order_id: OrderId, - order_state: OrderState, channel: Option, + &self, request_id: LSPSRequestId, counterparty_node_id: PublicKey, order_id: LSPS1OrderId, + order_state: LSPS1OrderState, channel: Option, ) -> Result<(), APIError> { let (result, response) = { let outer_state_lock = self.per_peer_state.read().unwrap(); @@ -378,11 +381,11 @@ where { let config = &outbound_channel.config; - let response = LSPS1Response::GetOrder(CreateOrderResponse { + let response = LSPS1Response::GetOrder(LSPS1CreateOrderResponse { order_id, order: config.order.clone(), order_state, - created_at: config.created_at, + created_at: config.created_at.clone(), payment: config.payment.clone(), channel, }); @@ -416,13 +419,13 @@ where result } - fn generate_order_id(&self) -> OrderId { + fn generate_order_id(&self) -> LSPS1OrderId { let bytes = self.entropy_source.get_secure_random_bytes(); - OrderId(utils::hex_str(&bytes[0..16])) + LSPS1OrderId(utils::hex_str(&bytes[0..16])) } } -impl ProtocolMessageHandler +impl LSPSProtocolMessageHandler for LSPS1ServiceHandler where ES::Target: EntropySource, @@ -457,3 +460,25 @@ where } } } + +fn check_range(min: u64, max: u64, value: u64) -> bool { + (value >= min) && (value <= max) +} + +fn is_valid(order: &LSPS1OrderParams, options: &LSPS1Options) -> bool { + let bool = check_range( + options.min_initial_client_balance_sat, + options.max_initial_client_balance_sat, + order.client_balance_sat, + ) && check_range( + options.min_initial_lsp_balance_sat, + options.max_initial_lsp_balance_sat, + order.lsp_balance_sat, + ) && check_range( + 1, + options.max_channel_expiry_blocks.into(), + order.channel_expiry_blocks.into(), + ); + + bool +} diff --git a/lightning-liquidity/src/lsps2/client.rs b/lightning-liquidity/src/lsps2/client.rs index 10707bc8c5a..455c8c86e24 100644 --- a/lightning-liquidity/src/lsps2/client.rs +++ b/lightning-liquidity/src/lsps2/client.rs @@ -6,13 +6,18 @@ // , at your option. You may not use this file except in accordance with one or both of these // licenses. -//! Contains the main LSPS2 client object, [`LSPS2ClientHandler`]. +//! Contains the main bLIP-52 / LSPS2 client object, [`LSPS2ClientHandler`]. -use crate::events::{Event, EventQueue}; -use crate::lsps0::ser::{ProtocolMessageHandler, RequestId, ResponseError}; +use alloc::string::{String, ToString}; + +use core::default::Default; +use core::ops::Deref; + +use crate::events::EventQueue; +use crate::lsps0::ser::{LSPSProtocolMessageHandler, LSPSRequestId, LSPSResponseError}; use crate::lsps2::event::LSPS2ClientEvent; use crate::message_queue::MessageQueue; -use crate::prelude::{new_hash_map, new_hash_set, HashMap, HashSet, String, ToString}; +use crate::prelude::{new_hash_map, new_hash_set, HashMap, HashSet}; use crate::sync::{Arc, Mutex, RwLock}; use lightning::ln::msgs::{ErrorAction, LightningError}; @@ -22,12 +27,9 @@ use lightning::util::logger::Level; use bitcoin::secp256k1::PublicKey; -use core::default::Default; -use core::ops::Deref; - use crate::lsps2::msgs::{ - BuyRequest, BuyResponse, GetInfoRequest, GetInfoResponse, LSPS2Message, LSPS2Request, - LSPS2Response, OpeningFeeParams, + LSPS2BuyRequest, LSPS2BuyResponse, LSPS2GetInfoRequest, LSPS2GetInfoResponse, LSPS2Message, + LSPS2OpeningFeeParams, LSPS2Request, LSPS2Response, }; /// Client-side configuration options for JIT channels. @@ -45,8 +47,8 @@ impl InboundJITChannel { } struct PeerState { - pending_get_info_requests: HashSet, - pending_buy_requests: HashMap, + pending_get_info_requests: HashSet, + pending_buy_requests: HashMap, } impl PeerState { @@ -57,13 +59,13 @@ impl PeerState { } } -/// The main object allowing to send and receive LSPS2 messages. +/// The main object allowing to send and receive bLIP-52 / LSPS2 messages. /// /// Note that currently only the 'client-trusts-LSP' trust model is supported, i.e., we don't /// provide any additional API guidance to allow withholding the preimage until the channel is -/// opened. Please refer to the [`LSPS2 specification`] for more information. +/// opened. Please refer to the [`bLIP-52 / LSPS2 specification`] for more information. /// -/// [`LSPS2 specification`]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS2#trust-models +/// [`bLIP-52 / LSPS2 specification`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models pub struct LSPS2ClientHandler where ES::Target: EntropySource, @@ -105,12 +107,12 @@ where /// `token` is an optional `String` that will be provided to the LSP. /// It can be used by the LSP as an API key, coupon code, or some other way to identify a user. /// - /// Returns the used [`RequestId`], which will be returned via [`OpeningParametersReady`]. + /// Returns the used [`LSPSRequestId`], which will be returned via [`OpeningParametersReady`]. /// /// [`OpeningParametersReady`]: crate::lsps2::event::LSPS2ClientEvent::OpeningParametersReady pub fn request_opening_params( &self, counterparty_node_id: PublicKey, token: Option, - ) -> RequestId { + ) -> LSPSRequestId { let request_id = crate::utils::generate_request_id(&self.entropy_source); { @@ -122,7 +124,7 @@ where peer_state_lock.pending_get_info_requests.insert(request_id.clone()); } - let request = LSPS2Request::GetInfo(GetInfoRequest { token }); + let request = LSPS2Request::GetInfo(LSPS2GetInfoRequest { token }); let msg = LSPS2Message::Request(request_id.clone(), request).into(); self.pending_messages.enqueue(&counterparty_node_id, msg); @@ -149,8 +151,8 @@ where /// [`InvoiceParametersReady`]: crate::lsps2::event::LSPS2ClientEvent::InvoiceParametersReady pub fn select_opening_params( &self, counterparty_node_id: PublicKey, payment_size_msat: Option, - opening_fee_params: OpeningFeeParams, - ) -> Result { + opening_fee_params: LSPS2OpeningFeeParams, + ) -> Result { let request_id = crate::utils::generate_request_id(&self.entropy_source); { @@ -173,7 +175,7 @@ where } } - let request = LSPS2Request::Buy(BuyRequest { opening_fee_params, payment_size_msat }); + let request = LSPS2Request::Buy(LSPS2BuyRequest { opening_fee_params, payment_size_msat }); let msg = LSPS2Message::Request(request_id.clone(), request).into(); self.pending_messages.enqueue(&counterparty_node_id, msg); @@ -181,7 +183,8 @@ where } fn handle_get_info_response( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, result: GetInfoResponse, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + result: LSPS2GetInfoResponse, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -198,13 +201,11 @@ where }); } - self.pending_events.enqueue(Event::LSPS2Client( - LSPS2ClientEvent::OpeningParametersReady { - request_id, - counterparty_node_id: *counterparty_node_id, - opening_fee_params_menu: result.opening_fee_params_menu, - }, - )); + self.pending_events.enqueue(LSPS2ClientEvent::OpeningParametersReady { + request_id, + counterparty_node_id: *counterparty_node_id, + opening_fee_params_menu: result.opening_fee_params_menu, + }); }, None => { return Err(LightningError { @@ -221,7 +222,8 @@ where } fn handle_get_info_error( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, _error: ResponseError, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + _error: LSPSResponseError, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -247,7 +249,8 @@ where } fn handle_buy_response( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, result: BuyResponse, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + result: LSPS2BuyResponse, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -264,15 +267,13 @@ where })?; if let Ok(intercept_scid) = result.jit_channel_scid.to_scid() { - self.pending_events.enqueue(Event::LSPS2Client( - LSPS2ClientEvent::InvoiceParametersReady { - request_id, - counterparty_node_id: *counterparty_node_id, - intercept_scid, - cltv_expiry_delta: result.lsp_cltv_expiry_delta, - payment_size_msat: jit_channel.payment_size_msat, - }, - )); + self.pending_events.enqueue(LSPS2ClientEvent::InvoiceParametersReady { + request_id, + counterparty_node_id: *counterparty_node_id, + intercept_scid, + cltv_expiry_delta: result.lsp_cltv_expiry_delta, + payment_size_msat: jit_channel.payment_size_msat, + }); } else { return Err(LightningError { err: format!( @@ -297,7 +298,8 @@ where } fn handle_buy_error( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, _error: ResponseError, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + _error: LSPSResponseError, ) -> Result<(), LightningError> { let outer_state_lock = self.per_peer_state.read().unwrap(); match outer_state_lock.get(counterparty_node_id) { @@ -318,7 +320,7 @@ where } } -impl ProtocolMessageHandler for LSPS2ClientHandler +impl LSPSProtocolMessageHandler for LSPS2ClientHandler where ES::Target: EntropySource, { diff --git a/lightning-liquidity/src/lsps2/event.rs b/lightning-liquidity/src/lsps2/event.rs index 1e28b259757..f20a74e199a 100644 --- a/lightning-liquidity/src/lsps2/event.rs +++ b/lightning-liquidity/src/lsps2/event.rs @@ -7,11 +7,12 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Contains LSPS2 event types +//! Contains bLIP-52 / LSPS2 event types -use super::msgs::OpeningFeeParams; -use crate::lsps0::ser::RequestId; -use crate::prelude::{String, Vec}; +use super::msgs::LSPS2OpeningFeeParams; +use crate::lsps0::ser::LSPSRequestId; +use alloc::string::String; +use alloc::vec::Vec; use bitcoin::secp256k1::PublicKey; @@ -25,18 +26,18 @@ pub enum LSPS2ClientEvent { /// /// [`LSPS2ClientHandler::select_opening_params`]: crate::lsps2::client::LSPS2ClientHandler::select_opening_params OpeningParametersReady { - /// The identifier of the issued LSPS2 `get_info` request, as returned by + /// The identifier of the issued bLIP-52 / LSPS2 `get_info` request, as returned by /// [`LSPS2ClientHandler::request_opening_params`] /// /// This can be used to track which request this event corresponds to. /// /// [`LSPS2ClientHandler::request_opening_params`]: crate::lsps2::client::LSPS2ClientHandler::request_opening_params - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the LSP that provided this response. counterparty_node_id: PublicKey, /// The menu of fee parameters the LSP is offering at this time. /// You must select one of these if you wish to proceed. - opening_fee_params_menu: Vec, + opening_fee_params_menu: Vec, }, /// Provides the necessary information to generate a payable invoice that then may be given to /// the payer. @@ -44,13 +45,13 @@ pub enum LSPS2ClientEvent { /// When the invoice is paid, the LSP will open a channel with the previously agreed upon /// parameters to you. InvoiceParametersReady { - /// The identifier of the issued LSPS2 `buy` request, as returned by + /// The identifier of the issued bLIP-52 / LSPS2 `buy` request, as returned by /// [`LSPS2ClientHandler::select_opening_params`]. /// /// This can be used to track which request this event corresponds to. /// /// [`LSPS2ClientHandler::select_opening_params`]: crate::lsps2::client::LSPS2ClientHandler::select_opening_params - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the LSP. counterparty_node_id: PublicKey, /// The intercept short channel id to use in the route hint. @@ -62,7 +63,7 @@ pub enum LSPS2ClientEvent { }, } -/// An event which an LSPS2 server should take some action in response to. +/// An event which an bLIP-52 / LSPS2 server should take some action in response to. #[derive(Clone, Debug, PartialEq, Eq)] pub enum LSPS2ServiceEvent { /// A request from a client for information about JIT Channel parameters. @@ -79,7 +80,7 @@ pub enum LSPS2ServiceEvent { /// An identifier that must be passed to [`LSPS2ServiceHandler::opening_fee_params_generated`]. /// /// [`LSPS2ServiceHandler::opening_fee_params_generated`]: crate::lsps2::service::LSPS2ServiceHandler::opening_fee_params_generated - request_id: RequestId, + request_id: LSPSRequestId, /// The node id of the client making the information request. counterparty_node_id: PublicKey, /// An optional token that can be used as an API key, coupon code, etc. @@ -99,11 +100,11 @@ pub enum LSPS2ServiceEvent { /// An identifier that must be passed into [`LSPS2ServiceHandler::invoice_parameters_generated`]. /// /// [`LSPS2ServiceHandler::invoice_parameters_generated`]: crate::lsps2::service::LSPS2ServiceHandler::invoice_parameters_generated - request_id: RequestId, + request_id: LSPSRequestId, /// The client node id that is making this request. counterparty_node_id: PublicKey, /// The channel parameters they have selected. - opening_fee_params: OpeningFeeParams, + opening_fee_params: LSPS2OpeningFeeParams, /// The size of the initial payment they would like to receive. payment_size_msat: Option, }, diff --git a/lightning-liquidity/src/lsps2/mod.rs b/lightning-liquidity/src/lsps2/mod.rs index 0a29ac636d6..1d5fb76d3b4 100644 --- a/lightning-liquidity/src/lsps2/mod.rs +++ b/lightning-liquidity/src/lsps2/mod.rs @@ -7,7 +7,7 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Implementation of LSPS2: JIT Channel Negotiation specification. +//! Implementation of bLIP-52 / LSPS2: JIT Channel Negotiation specification. pub mod client; pub mod event; diff --git a/lightning-liquidity/src/lsps2/msgs.rs b/lightning-liquidity/src/lsps2/msgs.rs index f7c0df9db06..84875d4ab7c 100644 --- a/lightning-liquidity/src/lsps2/msgs.rs +++ b/lightning-liquidity/src/lsps2/msgs.rs @@ -1,19 +1,21 @@ -//! Message, request, and other primitive types used to implement LSPS2. +//! Message, request, and other primitive types used to implement bLIP-52 / LSPS2. + +use alloc::string::String; +use alloc::vec::Vec; use core::convert::TryFrom; use bitcoin::hashes::hmac::{Hmac, HmacEngine}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::{Hash, HashEngine}; -use chrono::Utc; use serde::{Deserialize, Serialize}; use lightning::util::scid_utils; use crate::lsps0::ser::{ - string_amount, string_amount_option, LSPSMessage, RequestId, ResponseError, + string_amount, string_amount_option, LSPSDateTime, LSPSMessage, LSPSRequestId, + LSPSResponseError, }; -use crate::prelude::{String, Vec}; use crate::utils; pub(crate) const LSPS2_GET_INFO_METHOD_NAME: &str = "lsps2.get_info"; @@ -27,7 +29,7 @@ pub(crate) const LSPS2_BUY_REQUEST_PAYMENT_SIZE_TOO_LARGE_ERROR_CODE: i32 = 203; #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] /// A request made to an LSP to learn their current channel fees and parameters. -pub struct GetInfoRequest { +pub struct LSPS2GetInfoRequest { /// An optional token to provide to the LSP. pub token: Option, } @@ -35,14 +37,14 @@ pub struct GetInfoRequest { /// Fees and parameters for a JIT Channel without the promise. /// /// The promise will be calculated automatically for the LSP and this type converted -/// into an [`OpeningFeeParams`] for transit over the wire. -pub struct RawOpeningFeeParams { +/// into an [`LSPS2OpeningFeeParams`] for transit over the wire. +pub struct LSPS2RawOpeningFeeParams { /// The minimum fee required for the channel open. pub min_fee_msat: u64, /// A fee proportional to the size of the initial payment. pub proportional: u32, /// An [`ISO8601`](https://www.iso.org/iso-8601-date-and-time-format.html) formatted date for which these params are valid. - pub valid_until: chrono::DateTime, + pub valid_until: LSPSDateTime, /// The number of blocks after confirmation that the LSP promises it will keep the channel alive without closing. pub min_lifetime: u32, /// The maximum number of blocks that the client is allowed to set its `to_self_delay` parameter. @@ -53,8 +55,10 @@ pub struct RawOpeningFeeParams { pub max_payment_size_msat: u64, } -impl RawOpeningFeeParams { - pub(crate) fn into_opening_fee_params(self, promise_secret: &[u8; 32]) -> OpeningFeeParams { +impl LSPS2RawOpeningFeeParams { + pub(crate) fn into_opening_fee_params( + self, promise_secret: &[u8; 32], + ) -> LSPS2OpeningFeeParams { let mut hmac = HmacEngine::::new(promise_secret); hmac.input(&self.min_fee_msat.to_be_bytes()); hmac.input(&self.proportional.to_be_bytes()); @@ -65,7 +69,7 @@ impl RawOpeningFeeParams { hmac.input(&self.max_payment_size_msat.to_be_bytes()); let promise_bytes = Hmac::from_engine(hmac).to_byte_array(); let promise = utils::hex_str(&promise_bytes[..]); - OpeningFeeParams { + LSPS2OpeningFeeParams { min_fee_msat: self.min_fee_msat, proportional: self.proportional, valid_until: self.valid_until.clone(), @@ -82,16 +86,16 @@ impl RawOpeningFeeParams { /// Fees and parameters for a JIT Channel including the promise. /// /// The promise is an HMAC calculated using a secret known to the LSP and the rest of the fields as input. -/// It exists so the LSP can verify the authenticity of a client provided OpeningFeeParams by recalculating +/// It exists so the LSP can verify the authenticity of a client provided LSPS2OpeningFeeParams by recalculating /// the promise using the secret. Once verified they can be confident it was not modified by the client. -pub struct OpeningFeeParams { +pub struct LSPS2OpeningFeeParams { /// The minimum fee required for the channel open. #[serde(with = "string_amount")] pub min_fee_msat: u64, /// A fee proportional to the size of the initial payment. pub proportional: u32, /// An [`ISO8601`](https://www.iso.org/iso-8601-date-and-time-format.html) formatted date for which these params are valid. - pub valid_until: chrono::DateTime, + pub valid_until: LSPSDateTime, /// The number of blocks after confirmation that the LSP promises it will keep the channel alive without closing. pub min_lifetime: u32, /// The maximum number of blocks that the client is allowed to set its `to_self_delay` parameter. @@ -106,18 +110,18 @@ pub struct OpeningFeeParams { pub promise: String, } -/// A response to a [`GetInfoRequest`] +/// A response to a [`LSPS2GetInfoRequest`] #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct GetInfoResponse { +pub struct LSPS2GetInfoResponse { /// A set of opening fee parameters. - pub opening_fee_params_menu: Vec, + pub opening_fee_params_menu: Vec, } /// A request to buy a JIT channel. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct BuyRequest { +pub struct LSPS2BuyRequest { /// The fee parameters you would like to use. - pub opening_fee_params: OpeningFeeParams, + pub opening_fee_params: LSPS2OpeningFeeParams, /// The size of the initial payment you expect to receive. #[serde(default)] #[serde(skip_serializing_if = "Option::is_none")] @@ -127,9 +131,9 @@ pub struct BuyRequest { /// A newtype that holds a `short_channel_id` in human readable format of BBBxTTTx000. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct InterceptScid(String); +pub struct LSPS2InterceptScid(String); -impl From for InterceptScid { +impl From for LSPS2InterceptScid { fn from(scid: u64) -> Self { let block = scid_utils::block_from_scid(scid); let tx_index = scid_utils::tx_index_from_scid(scid); @@ -139,20 +143,20 @@ impl From for InterceptScid { } } -impl InterceptScid { - /// Try to convert a [`InterceptScid`] into a u64 used by LDK. +impl LSPS2InterceptScid { + /// Try to convert a [`LSPS2InterceptScid`] into a u64 used by LDK. pub fn to_scid(&self) -> Result { utils::scid_from_human_readable_string(&self.0) } } -/// A response to a [`BuyRequest`]. +/// A response to a [`LSPS2BuyRequest`]. /// /// Includes information needed to construct an invoice. #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -pub struct BuyResponse { +pub struct LSPS2BuyResponse { /// The intercept short channel id used by LSP to identify need to open channel. - pub jit_channel_scid: InterceptScid, + pub jit_channel_scid: LSPS2InterceptScid, /// The locktime expiry delta the lsp requires. pub lsp_cltv_expiry_delta: u32, /// A flag that indicates who is trusting who. @@ -161,34 +165,34 @@ pub struct BuyResponse { } #[derive(Clone, Debug, PartialEq, Eq)] -/// An enum that captures all the valid JSON-RPC requests in the LSPS2 protocol. +/// An enum that captures all the valid JSON-RPC requests in the bLIP-52 / LSPS2 protocol. pub enum LSPS2Request { /// A request to learn an LSP's channel fees and parameters. - GetInfo(GetInfoRequest), + GetInfo(LSPS2GetInfoRequest), /// A request to buy a JIT channel from an LSP. - Buy(BuyRequest), + Buy(LSPS2BuyRequest), } #[derive(Clone, Debug, PartialEq, Eq)] -/// An enum that captures all the valid JSON-RPC responses in the LSPS2 protocol. +/// An enum that captures all the valid JSON-RPC responses in the bLIP-52 / LSPS2 protocol. pub enum LSPS2Response { /// A successful response to a [`LSPS2Request::GetInfo`] request. - GetInfo(GetInfoResponse), + GetInfo(LSPS2GetInfoResponse), /// An error response to a [`LSPS2Request::GetInfo`] request. - GetInfoError(ResponseError), + GetInfoError(LSPSResponseError), /// A successful response to a [`LSPS2Request::Buy`] request. - Buy(BuyResponse), + Buy(LSPS2BuyResponse), /// An error response to a [`LSPS2Request::Buy`] request. - BuyError(ResponseError), + BuyError(LSPSResponseError), } #[derive(Clone, Debug, PartialEq, Eq)] -/// An enum that captures all valid JSON-RPC messages in the LSPS2 protocol. +/// An enum that captures all valid JSON-RPC messages in the bLIP-52 / LSPS2 protocol. pub enum LSPS2Message { /// An LSPS2 JSON-RPC request. - Request(RequestId, LSPS2Request), + Request(LSPSRequestId, LSPS2Request), /// An LSPS2 JSON-RPC response. - Response(RequestId, LSPS2Response), + Response(LSPSRequestId, LSPS2Response), } impl TryFrom for LSPS2Message { @@ -212,21 +216,23 @@ impl From for LSPSMessage { #[cfg(test)] mod tests { use super::*; + use crate::alloc::string::ToString; use crate::lsps2::utils::is_valid_opening_fee_params; + use core::str::FromStr; + #[test] fn into_opening_fee_params_produces_valid_promise() { let min_fee_msat = 100; let proportional = 21; - let valid_until: chrono::DateTime = - chrono::DateTime::parse_from_rfc3339("2035-05-20T08:30:45Z").unwrap().into(); + let valid_until = LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(); let min_lifetime = 144; let max_client_to_self_delay = 128; let min_payment_size_msat = 1; let max_payment_size_msat = 100_000_000; - let raw = RawOpeningFeeParams { + let raw = LSPS2RawOpeningFeeParams { min_fee_msat, proportional, valid_until: valid_until.clone().into(), @@ -255,16 +261,16 @@ mod tests { fn changing_single_field_produced_invalid_params() { let min_fee_msat = 100; let proportional = 21; - let valid_until = chrono::DateTime::parse_from_rfc3339("2035-05-20T08:30:45Z").unwrap(); + let valid_until = LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(); let min_lifetime = 144; let max_client_to_self_delay = 128; let min_payment_size_msat = 1; let max_payment_size_msat = 100_000_000; - let raw = RawOpeningFeeParams { + let raw = LSPS2RawOpeningFeeParams { min_fee_msat, proportional, - valid_until: valid_until.into(), + valid_until, min_lifetime, max_client_to_self_delay, min_payment_size_msat, @@ -282,16 +288,16 @@ mod tests { fn wrong_secret_produced_invalid_params() { let min_fee_msat = 100; let proportional = 21; - let valid_until = chrono::DateTime::parse_from_rfc3339("2035-05-20T08:30:45Z").unwrap(); + let valid_until = LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(); let min_lifetime = 144; let max_client_to_self_delay = 128; let min_payment_size_msat = 1; let max_payment_size_msat = 100_000_000; - let raw = RawOpeningFeeParams { + let raw = LSPS2RawOpeningFeeParams { min_fee_msat, proportional, - valid_until: valid_until.into(), + valid_until, min_lifetime, max_client_to_self_delay, min_payment_size_msat, @@ -311,16 +317,16 @@ mod tests { fn expired_params_produces_invalid_params() { let min_fee_msat = 100; let proportional = 21; - let valid_until = chrono::DateTime::parse_from_rfc3339("2023-05-20T08:30:45Z").unwrap(); + let valid_until = LSPSDateTime::from_str("2023-05-20T08:30:45Z").unwrap(); let min_lifetime = 144; let max_client_to_self_delay = 128; let min_payment_size_msat = 1; let max_payment_size_msat = 100_000_000; - let raw = RawOpeningFeeParams { + let raw = LSPS2RawOpeningFeeParams { min_fee_msat, proportional, - valid_until: valid_until.into(), + valid_until, min_lifetime, max_client_to_self_delay, min_payment_size_msat, @@ -337,16 +343,16 @@ mod tests { fn buy_request_serialization() { let min_fee_msat = 100; let proportional = 21; - let valid_until = chrono::DateTime::parse_from_rfc3339("2023-05-20T08:30:45Z").unwrap(); + let valid_until = LSPSDateTime::from_str("2023-05-20T08:30:45Z").unwrap(); let min_lifetime = 144; let max_client_to_self_delay = 128; let min_payment_size_msat = 1; let max_payment_size_msat = 100_000_000; - let raw = RawOpeningFeeParams { + let raw = LSPS2RawOpeningFeeParams { min_fee_msat, proportional, - valid_until: valid_until.into(), + valid_until, min_lifetime, max_client_to_self_delay, min_payment_size_msat, @@ -362,13 +368,13 @@ mod tests { let payment_size_msat = Some(1234); let buy_request_fixed = - BuyRequest { opening_fee_params: opening_fee_params.clone(), payment_size_msat }; + LSPS2BuyRequest { opening_fee_params: opening_fee_params.clone(), payment_size_msat }; let json_str = r#"{"opening_fee_params":{"max_client_to_self_delay":128,"max_payment_size_msat":"100000000","min_fee_msat":"100","min_lifetime":144,"min_payment_size_msat":"1","promise":"1134a5c51e3ba2e8f4259610d5e12c1bf4c50ddcd3f8af563e0a00d1fff41dea","proportional":21,"valid_until":"2023-05-20T08:30:45Z"},"payment_size_msat":"1234"}"#; assert_eq!(json_str, serde_json::json!(buy_request_fixed).to_string()); assert_eq!(buy_request_fixed, serde_json::from_str(json_str).unwrap()); let payment_size_msat = None; - let buy_request_variable = BuyRequest { opening_fee_params, payment_size_msat }; + let buy_request_variable = LSPS2BuyRequest { opening_fee_params, payment_size_msat }; // Check we skip serialization if payment_size_msat is None. let json_str = r#"{"opening_fee_params":{"max_client_to_self_delay":128,"max_payment_size_msat":"100000000","min_fee_msat":"100","min_lifetime":144,"min_payment_size_msat":"1","promise":"1134a5c51e3ba2e8f4259610d5e12c1bf4c50ddcd3f8af563e0a00d1fff41dea","proportional":21,"valid_until":"2023-05-20T08:30:45Z"}}"#; @@ -407,7 +413,7 @@ mod tests { } ] }"#; - let _get_info_response: GetInfoResponse = serde_json::from_str(json_str).unwrap(); + let _get_info_response: LSPS2GetInfoResponse = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "opening_fee_params": { @@ -422,13 +428,13 @@ mod tests { }, "payment_size_msat": "42000" }"#; - let _buy_request: BuyRequest = serde_json::from_str(json_str).unwrap(); + let _buy_request: LSPS2BuyRequest = serde_json::from_str(json_str).unwrap(); let json_str = r#"{ "jit_channel_scid": "29451x4815x1", "lsp_cltv_expiry_delta" : 144, "client_trusts_lsp": false }"#; - let _buy_response: BuyResponse = serde_json::from_str(json_str).unwrap(); + let _buy_response: LSPS2BuyResponse = serde_json::from_str(json_str).unwrap(); } } diff --git a/lightning-liquidity/src/lsps2/payment_queue.rs b/lightning-liquidity/src/lsps2/payment_queue.rs index 1ad3164df36..d956dfc9d81 100644 --- a/lightning-liquidity/src/lsps2/payment_queue.rs +++ b/lightning-liquidity/src/lsps2/payment_queue.rs @@ -1,4 +1,5 @@ -use crate::prelude::Vec; +use alloc::vec::Vec; + use lightning::ln::channelmanager::InterceptId; use lightning_types::payment::PaymentHash; diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index d4682a9b346..8de630561ee 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -7,11 +7,17 @@ // You may not use this file except in accordance with one or both of these // licenses. -//! Contains the main LSPS2 server-side object, [`LSPS2ServiceHandler`]. +//! Contains the main bLIP-52 / LSPS2 server-side object, [`LSPS2ServiceHandler`]. -use crate::events::{Event, EventQueue}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; + +use core::ops::Deref; +use core::sync::atomic::{AtomicUsize, Ordering}; + +use crate::events::EventQueue; use crate::lsps0::ser::{ - LSPSMessage, ProtocolMessageHandler, RequestId, ResponseError, + LSPSMessage, LSPSProtocolMessageHandler, LSPSRequestId, LSPSResponseError, JSONRPC_INTERNAL_ERROR_ERROR_CODE, JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE, LSPS0_CLIENT_REJECTED_ERROR_CODE, }; @@ -22,7 +28,7 @@ use crate::lsps2::utils::{ }; use crate::message_queue::MessageQueue; use crate::prelude::hash_map::Entry; -use crate::prelude::{new_hash_map, HashMap, String, ToString, Vec}; +use crate::prelude::{new_hash_map, HashMap}; use crate::sync::{Arc, Mutex, MutexGuard, RwLock}; use lightning::events::HTLCDestination; @@ -36,12 +42,9 @@ use lightning_types::payment::PaymentHash; use bitcoin::secp256k1::PublicKey; -use core::ops::Deref; -use core::sync::atomic::{AtomicUsize, Ordering}; - use crate::lsps2::msgs::{ - BuyRequest, BuyResponse, GetInfoRequest, GetInfoResponse, LSPS2Message, LSPS2Request, - LSPS2Response, OpeningFeeParams, RawOpeningFeeParams, + LSPS2BuyRequest, LSPS2BuyResponse, LSPS2GetInfoRequest, LSPS2GetInfoResponse, LSPS2Message, + LSPS2OpeningFeeParams, LSPS2RawOpeningFeeParams, LSPS2Request, LSPS2Response, LSPS2_BUY_REQUEST_INVALID_OPENING_FEE_PARAMS_ERROR_CODE, LSPS2_BUY_REQUEST_PAYMENT_SIZE_TOO_LARGE_ERROR_CODE, LSPS2_BUY_REQUEST_PAYMENT_SIZE_TOO_SMALL_ERROR_CODE, @@ -142,7 +145,7 @@ impl OutboundJITChannelState { } fn htlc_intercepted( - &mut self, opening_fee_params: &OpeningFeeParams, payment_size_msat: &Option, + &mut self, opening_fee_params: &LSPS2OpeningFeeParams, payment_size_msat: &Option, htlc: InterceptedHTLC, ) -> Result<(Self, Option), ChannelStateError> { match self { @@ -395,13 +398,14 @@ impl OutboundJITChannelState { struct OutboundJITChannel { state: OutboundJITChannelState, user_channel_id: u128, - opening_fee_params: OpeningFeeParams, + opening_fee_params: LSPS2OpeningFeeParams, payment_size_msat: Option, } impl OutboundJITChannel { fn new( - payment_size_msat: Option, opening_fee_params: OpeningFeeParams, user_channel_id: u128, + payment_size_msat: Option, opening_fee_params: LSPS2OpeningFeeParams, + user_channel_id: u128, ) -> Self { Self { user_channel_id, @@ -456,7 +460,7 @@ struct PeerState { outbound_channels_by_intercept_scid: HashMap, intercept_scid_by_user_channel_id: HashMap, intercept_scid_by_channel_id: HashMap, - pending_requests: HashMap, + pending_requests: HashMap, } impl PeerState { @@ -522,7 +526,7 @@ macro_rules! get_or_insert_peer_state_entry { match $outer_state_lock.entry(*$counterparty_node_id) { Entry::Vacant(e) => { if is_limited_by_max_total_peers { - let error_response = ResponseError { + let error_response = LSPSResponseError { code: JSONRPC_INTERNAL_ERROR_ERROR_CODE, message: JSONRPC_INTERNAL_ERROR_ERROR_MESSAGE.to_string(), data: None, }; @@ -549,8 +553,8 @@ macro_rules! get_or_insert_peer_state_entry { }} } -/// The main object allowing to send and receive LSPS2 messages. -pub struct LSPS2ServiceHandler +/// The main object allowing to send and receive bLIP-52 / LSPS2 messages. +pub struct LSPS2ServiceHandler where CM::Target: AChannelManager, { @@ -564,7 +568,7 @@ where config: LSPS2ServiceConfig, } -impl LSPS2ServiceHandler +impl LSPS2ServiceHandler where CM::Target: AChannelManager, { @@ -591,7 +595,7 @@ where /// /// [`LSPS2ServiceEvent::GetInfo`]: crate::lsps2::event::LSPS2ServiceEvent::GetInfo pub fn invalid_token_provided( - &self, counterparty_node_id: &PublicKey, request_id: RequestId, + &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, ) -> Result<(), APIError> { let (result, response) = { let outer_state_lock = self.per_peer_state.read().unwrap(); @@ -602,7 +606,7 @@ where match self.remove_pending_request(&mut peer_state_lock, &request_id) { Some(LSPS2Request::GetInfo(_)) => { - let response = LSPS2Response::GetInfoError(ResponseError { + let response = LSPS2Response::GetInfoError(LSPSResponseError { code: LSPS2_GET_INFO_REQUEST_UNRECOGNIZED_OR_STALE_TOKEN_ERROR_CODE, message: "an unrecognized or stale token was provided".to_string(), data: None, @@ -646,8 +650,8 @@ where /// /// [`LSPS2ServiceEvent::GetInfo`]: crate::lsps2::event::LSPS2ServiceEvent::GetInfo pub fn opening_fee_params_generated( - &self, counterparty_node_id: &PublicKey, request_id: RequestId, - opening_fee_params_menu: Vec, + &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, + opening_fee_params_menu: Vec, ) -> Result<(), APIError> { let (result, response) = { let outer_state_lock = self.per_peer_state.read().unwrap(); @@ -658,7 +662,7 @@ where match self.remove_pending_request(&mut peer_state_lock, &request_id) { Some(LSPS2Request::GetInfo(_)) => { - let response = LSPS2Response::GetInfo(GetInfoResponse { + let response = LSPS2Response::GetInfo(LSPS2GetInfoResponse { opening_fee_params_menu: opening_fee_params_menu .into_iter() .map(|param| { @@ -705,7 +709,7 @@ where /// /// [`LSPS2ServiceEvent::BuyRequest`]: crate::lsps2::event::LSPS2ServiceEvent::BuyRequest pub fn invoice_parameters_generated( - &self, counterparty_node_id: &PublicKey, request_id: RequestId, intercept_scid: u64, + &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, ) -> Result<(), APIError> { let (result, response) = { @@ -736,7 +740,7 @@ where peer_state_lock .insert_outbound_channel(intercept_scid, outbound_jit_channel); - let response = LSPS2Response::Buy(BuyResponse { + let response = LSPS2Response::Buy(LSPS2BuyResponse { jit_channel_scid: intercept_scid.into(), lsp_cltv_expiry_delta: cltv_expiry_delta, client_trusts_lsp, @@ -806,13 +810,13 @@ where }; match jit_channel.htlc_intercepted(htlc) { Ok(Some(HTLCInterceptedAction::OpenChannel(open_channel_params))) => { - let event = Event::LSPS2Service(LSPS2ServiceEvent::OpenChannel { + let event = LSPS2ServiceEvent::OpenChannel { their_network_key: counterparty_node_id.clone(), amt_to_forward_msat: open_channel_params.amt_to_forward_msat, opening_fee_msat: open_channel_params.opening_fee_msat, user_channel_id: jit_channel.user_channel_id, intercept_scid, - }); + }; self.pending_events.enqueue(event); }, Ok(Some(HTLCInterceptedAction::ForwardHTLC(channel_id))) => { @@ -1076,7 +1080,8 @@ where } fn handle_get_info_request( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, params: GetInfoRequest, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, + params: LSPS2GetInfoRequest, ) -> Result<(), LightningError> { let (result, response) = { let mut outer_state_lock = self.per_peer_state.write().unwrap(); @@ -1091,11 +1096,11 @@ where request, ) { (Ok(()), msg) => { - let event = Event::LSPS2Service(LSPS2ServiceEvent::GetInfo { + let event = LSPS2ServiceEvent::GetInfo { request_id, counterparty_node_id: *counterparty_node_id, token: params.token, - }); + }; self.pending_events.enqueue(event); (Ok(()), msg) @@ -1112,11 +1117,11 @@ where } fn handle_buy_request( - &self, request_id: RequestId, counterparty_node_id: &PublicKey, params: BuyRequest, + &self, request_id: LSPSRequestId, counterparty_node_id: &PublicKey, params: LSPS2BuyRequest, ) -> Result<(), LightningError> { if let Some(payment_size_msat) = params.payment_size_msat { if payment_size_msat < params.opening_fee_params.min_payment_size_msat { - let response = LSPS2Response::BuyError(ResponseError { + let response = LSPS2Response::BuyError(LSPSResponseError { code: LSPS2_BUY_REQUEST_PAYMENT_SIZE_TOO_SMALL_ERROR_CODE, message: "payment size is below our minimum supported payment size".to_string(), data: None, @@ -1131,7 +1136,7 @@ where } if payment_size_msat > params.opening_fee_params.max_payment_size_msat { - let response = LSPS2Response::BuyError(ResponseError { + let response = LSPS2Response::BuyError(LSPSResponseError { code: LSPS2_BUY_REQUEST_PAYMENT_SIZE_TOO_LARGE_ERROR_CODE, message: "payment size is above our maximum supported payment size".to_string(), data: None, @@ -1151,7 +1156,7 @@ where ) { Some(opening_fee) => { if opening_fee >= payment_size_msat { - let response = LSPS2Response::BuyError(ResponseError { + let response = LSPS2Response::BuyError(LSPSResponseError { code: LSPS2_BUY_REQUEST_PAYMENT_SIZE_TOO_SMALL_ERROR_CODE, message: "payment size is too small to cover the opening fee" .to_string(), @@ -1166,7 +1171,7 @@ where } }, None => { - let response = LSPS2Response::BuyError(ResponseError { + let response = LSPS2Response::BuyError(LSPSResponseError { code: LSPS2_BUY_REQUEST_PAYMENT_SIZE_TOO_LARGE_ERROR_CODE, message: "overflow error when calculating opening_fee".to_string(), data: None, @@ -1183,7 +1188,7 @@ where // TODO: if payment_size_msat is specified, make sure our node has sufficient incoming liquidity from public network to receive it. if !is_valid_opening_fee_params(¶ms.opening_fee_params, &self.config.promise_secret) { - let response = LSPS2Response::BuyError(ResponseError { + let response = LSPS2Response::BuyError(LSPSResponseError { code: LSPS2_BUY_REQUEST_INVALID_OPENING_FEE_PARAMS_ERROR_CODE, message: "valid_until is already past OR the promise did not match the provided parameters".to_string(), data: None, @@ -1210,12 +1215,12 @@ where request, ) { (Ok(()), msg) => { - let event = Event::LSPS2Service(LSPS2ServiceEvent::BuyRequest { + let event = LSPS2ServiceEvent::BuyRequest { request_id, counterparty_node_id: *counterparty_node_id, opening_fee_params: params.opening_fee_params, payment_size_msat: params.payment_size_msat, - }); + }; self.pending_events.enqueue(event); (Ok(()), msg) @@ -1232,11 +1237,11 @@ where } fn insert_pending_request<'a>( - &self, peer_state_lock: &mut MutexGuard<'a, PeerState>, request_id: RequestId, + &self, peer_state_lock: &mut MutexGuard<'a, PeerState>, request_id: LSPSRequestId, counterparty_node_id: PublicKey, request: LSPS2Request, ) -> (Result<(), LightningError>, Option) { if self.total_pending_requests.load(Ordering::Relaxed) >= MAX_TOTAL_PENDING_REQUESTS { - let response = LSPS2Response::BuyError(ResponseError { + let response = LSPS2Response::BuyError(LSPSResponseError { code: LSPS0_CLIENT_REJECTED_ERROR_CODE, message: "Reached maximum number of pending requests. Please try again later." .to_string(), @@ -1258,7 +1263,7 @@ where self.total_pending_requests.fetch_add(1, Ordering::Relaxed); (Ok(()), None) } else { - let response = LSPS2Response::BuyError(ResponseError { + let response = LSPS2Response::BuyError(LSPSResponseError { code: LSPS0_CLIENT_REJECTED_ERROR_CODE, message: "Reached maximum number of pending requests. Please try again later." .to_string(), @@ -1278,7 +1283,7 @@ where } fn remove_pending_request<'a>( - &self, peer_state_lock: &mut MutexGuard<'a, PeerState>, request_id: &RequestId, + &self, peer_state_lock: &mut MutexGuard<'a, PeerState>, request_id: &LSPSRequestId, ) -> Option { match peer_state_lock.pending_requests.remove(request_id) { Some(req) => { @@ -1345,7 +1350,7 @@ where } } -impl ProtocolMessageHandler for LSPS2ServiceHandler +impl LSPSProtocolMessageHandler for LSPS2ServiceHandler where CM::Target: AChannelManager, { @@ -1417,12 +1422,14 @@ fn calculate_amount_to_forward_per_htlc( #[cfg(test)] mod tests { - use super::*; - use chrono::TimeZone; - use chrono::Utc; + + use crate::lsps0::ser::LSPSDateTime; + use proptest::prelude::*; + use core::str::FromStr; + const MAX_VALUE_MSAT: u64 = 21_000_000_0000_0000_000; fn arb_forward_amounts() -> impl Strategy { @@ -1513,10 +1520,10 @@ mod tests { #[test] fn test_jit_channel_state_mpp() { let payment_size_msat = Some(500_000_000); - let opening_fee_params = OpeningFeeParams { + let opening_fee_params = LSPS2OpeningFeeParams { min_fee_msat: 10_000_000, proportional: 10_000, - valid_until: Utc.timestamp_opt(3000, 0).unwrap(), + valid_until: LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(), min_lifetime: 4032, max_client_to_self_delay: 2016, min_payment_size_msat: 10_000_000, @@ -1705,10 +1712,10 @@ mod tests { #[test] fn test_jit_channel_state_no_mpp() { let payment_size_msat = None; - let opening_fee_params = OpeningFeeParams { + let opening_fee_params = LSPS2OpeningFeeParams { min_fee_msat: 10_000_000, proportional: 10_000, - valid_until: Utc.timestamp_opt(3000, 0).unwrap(), + valid_until: LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(), min_lifetime: 4032, max_client_to_self_delay: 2016, min_payment_size_msat: 10_000_000, diff --git a/lightning-liquidity/src/lsps2/utils.rs b/lightning-liquidity/src/lsps2/utils.rs index 8a085b76c22..76ceeb8f60b 100644 --- a/lightning-liquidity/src/lsps2/utils.rs +++ b/lightning-liquidity/src/lsps2/utils.rs @@ -1,18 +1,15 @@ -//! Utilities for implementing the LSPS2 standard. +//! Utilities for implementing the bLIP-52 / LSPS2 standard. -use crate::lsps2::msgs::OpeningFeeParams; +use crate::lsps2::msgs::LSPS2OpeningFeeParams; use crate::utils; use bitcoin::hashes::hmac::{Hmac, HmacEngine}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::{Hash, HashEngine}; -#[cfg(feature = "std")] -use std::time::{SystemTime, UNIX_EPOCH}; - /// Determines if the given parameters are valid given the secret used to generate the promise. pub fn is_valid_opening_fee_params( - fee_params: &OpeningFeeParams, promise_secret: &[u8; 32], + fee_params: &LSPS2OpeningFeeParams, promise_secret: &[u8; 32], ) -> bool { if is_expired_opening_fee_params(fee_params) { return false; @@ -32,19 +29,10 @@ pub fn is_valid_opening_fee_params( /// Determines if the given parameters are expired, or still valid. #[cfg_attr(not(feature = "std"), allow(unused_variables))] -pub fn is_expired_opening_fee_params(fee_params: &OpeningFeeParams) -> bool { +pub fn is_expired_opening_fee_params(fee_params: &LSPS2OpeningFeeParams) -> bool { #[cfg(feature = "std")] { - let seconds_since_epoch = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("system clock to be ahead of the unix epoch") - .as_secs(); - let valid_until_seconds_since_epoch = fee_params - .valid_until - .timestamp() - .try_into() - .expect("expiration to be ahead of unix epoch"); - seconds_since_epoch > valid_until_seconds_since_epoch + fee_params.valid_until.is_past() } #[cfg(not(feature = "std"))] { @@ -57,7 +45,7 @@ pub fn is_expired_opening_fee_params(fee_params: &OpeningFeeParams) -> bool { /// /// Returns [`Option::None`] when the computation overflows. /// -/// See the [`specification`](https://github.com/BitcoinAndLightningLayerSpecs/lsp/tree/main/LSPS2#computing-the-opening_fee) for more details. +/// See the [`specification`](https://github.com/lightning/blips/blob/master/blip-0052.md#computing-the-opening_fee) for more details. pub fn compute_opening_fee( payment_size_msat: u64, opening_fee_min_fee_msat: u64, opening_fee_proportional: u64, ) -> Option { diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index a4c13033370..17fb406bf5c 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -1,9 +1,13 @@ -use crate::events::{Event, EventQueue}; +use alloc::boxed::Box; +use alloc::string::ToString; +use alloc::vec::Vec; + +use crate::events::{EventQueue, LiquidityEvent}; use crate::lsps0::client::LSPS0ClientHandler; use crate::lsps0::msgs::LSPS0Message; use crate::lsps0::ser::{ - LSPSMessage, LSPSMethod, ProtocolMessageHandler, RawLSPSMessage, RequestId, ResponseError, - JSONRPC_INVALID_MESSAGE_ERROR_CODE, JSONRPC_INVALID_MESSAGE_ERROR_MESSAGE, + LSPSMessage, LSPSMethod, LSPSProtocolMessageHandler, LSPSRequestId, LSPSResponseError, + RawLSPSMessage, JSONRPC_INVALID_MESSAGE_ERROR_CODE, JSONRPC_INVALID_MESSAGE_ERROR_MESSAGE, LSPS_MESSAGE_TYPE_ID, }; use crate::lsps0::service::LSPS0ServiceHandler; @@ -17,7 +21,7 @@ use crate::lsps1::service::{LSPS1ServiceConfig, LSPS1ServiceHandler}; use crate::lsps2::client::{LSPS2ClientConfig, LSPS2ClientHandler}; use crate::lsps2::msgs::LSPS2Message; use crate::lsps2::service::{LSPS2ServiceConfig, LSPS2ServiceHandler}; -use crate::prelude::{new_hash_map, new_hash_set, Box, HashMap, HashSet, ToString, Vec}; +use crate::prelude::{new_hash_map, new_hash_set, HashMap, HashSet}; use crate::sync::{Arc, Mutex, RwLock}; use lightning::chain::{self, BestBlock, Confirm, Filter, Listen}; @@ -73,7 +77,7 @@ pub struct LiquidityClientConfig { /// [`LiquidityManager`] to wake the [`PeerManager`] when there are pending messages to be sent. /// /// Users need to continually poll [`LiquidityManager::get_and_clear_pending_events`] in order to surface -/// [`Event`]'s that likely need to be handled. +/// [`LiquidityEvent`]'s that likely need to be handled. /// /// If the LSPS2 service is configured, users must forward the following parameters from LDK events: /// - [`Event::HTLCIntercepted`] to [`LSPS2ServiceHandler::htlc_intercepted`] @@ -95,7 +99,7 @@ where { pending_messages: Arc, pending_events: Arc, - request_id_to_method_map: Mutex>, + request_id_to_method_map: Mutex>, // We ignore peers if they send us bogus data. ignored_peers: RwLock>, lsps0_client_handler: LSPS0ClientHandler, @@ -146,7 +150,7 @@ where { let lsps2_service_handler = service_config.as_ref().and_then(|config| { config.lsps2_service_config.as_ref().map(|config| { if let Some(number) = - as ProtocolMessageHandler>::PROTOCOL_NUMBER + as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER { supported_protocols.push(number); } @@ -173,7 +177,7 @@ where { #[cfg(lsps1_service)] let lsps1_service_handler = service_config.as_ref().and_then(|config| { if let Some(number) = - as ProtocolMessageHandler>::PROTOCOL_NUMBER + as LSPSProtocolMessageHandler>::PROTOCOL_NUMBER { supported_protocols.push(number); } @@ -329,7 +333,7 @@ where { /// /// [`MAX_EVENT_QUEUE_SIZE`]: crate::events::MAX_EVENT_QUEUE_SIZE #[cfg(feature = "std")] - pub fn wait_next_event(&self) -> Event { + pub fn wait_next_event(&self) -> LiquidityEvent { self.pending_events.wait_next_event() } @@ -342,7 +346,7 @@ where { /// [`MAX_EVENT_QUEUE_SIZE`] has been reached. /// /// [`MAX_EVENT_QUEUE_SIZE`]: crate::events::MAX_EVENT_QUEUE_SIZE - pub fn next_event(&self) -> Option { + pub fn next_event(&self) -> Option { self.pending_events.next_event() } @@ -355,7 +359,7 @@ where { /// [`MAX_EVENT_QUEUE_SIZE`] has been reached. /// /// [`MAX_EVENT_QUEUE_SIZE`]: crate::events::MAX_EVENT_QUEUE_SIZE - pub async fn next_event_async(&self) -> Event { + pub async fn next_event_async(&self) -> LiquidityEvent { self.pending_events.next_event_async().await } @@ -368,7 +372,7 @@ where { /// [`MAX_EVENT_QUEUE_SIZE`] has been reached. /// /// [`MAX_EVENT_QUEUE_SIZE`]: crate::events::MAX_EVENT_QUEUE_SIZE - pub fn get_and_clear_pending_events(&self) -> Vec { + pub fn get_and_clear_pending_events(&self) -> Vec { self.pending_events.get_and_clear_pending_events() } @@ -485,7 +489,7 @@ where LSPSMessage::from_str_with_id_map(&msg.payload, &mut request_id_to_method_map) } .map_err(|_| { - let error = ResponseError { + let error = LSPSResponseError { code: JSONRPC_INVALID_MESSAGE_ERROR_CODE, message: JSONRPC_INVALID_MESSAGE_ERROR_MESSAGE.to_string(), data: None, diff --git a/lightning-liquidity/src/message_queue.rs b/lightning-liquidity/src/message_queue.rs index 7b61a87bcd4..49a98ecfa68 100644 --- a/lightning-liquidity/src/message_queue.rs +++ b/lightning-liquidity/src/message_queue.rs @@ -1,7 +1,10 @@ //! Holds types and traits used to implement message queues for [`LSPSMessage`]s. +use alloc::boxed::Box; +use alloc::collections::VecDeque; +use alloc::vec::Vec; + use crate::lsps0::ser::LSPSMessage; -use crate::prelude::{Box, Vec, VecDeque}; use crate::sync::{Mutex, RwLock}; use bitcoin::secp256k1::PublicKey; diff --git a/lightning-liquidity/src/tests/utils.rs b/lightning-liquidity/src/tests/utils.rs index af5e55ae26b..204873ed11a 100644 --- a/lightning-liquidity/src/tests/utils.rs +++ b/lightning-liquidity/src/tests/utils.rs @@ -1,4 +1,5 @@ -use crate::prelude::Vec; +use alloc::vec::Vec; + use bitcoin::secp256k1::PublicKey; use lightning::io; use lightning::sign::EntropySource; diff --git a/lightning-liquidity/src/utils.rs b/lightning-liquidity/src/utils.rs index e355c72eb65..ab4c242d6f0 100644 --- a/lightning-liquidity/src/utils.rs +++ b/lightning-liquidity/src/utils.rs @@ -1,8 +1,9 @@ +use alloc::string::String; use core::{fmt::Write, ops::Deref}; + use lightning::sign::EntropySource; -use crate::lsps0::ser::RequestId; -use crate::prelude::String; +use crate::lsps0::ser::LSPSRequestId; pub fn scid_from_human_readable_string(human_readable_scid: &str) -> Result { let mut parts = human_readable_scid.split('x'); @@ -14,12 +15,12 @@ pub fn scid_from_human_readable_string(human_readable_scid: &str) -> Result(entropy_source: &ES) -> RequestId +pub(crate) fn generate_request_id(entropy_source: &ES) -> LSPSRequestId where ES::Target: EntropySource, { let bytes = entropy_source.get_secure_random_bytes(); - RequestId(hex_str(&bytes[0..16])) + LSPSRequestId(hex_str(&bytes[0..16])) } #[inline] diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index 92e172606ab..5a3f88dacac 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -4,10 +4,11 @@ mod common; use common::{create_service_and_client_nodes, get_lsps_message, Node}; -use lightning_liquidity::events::Event; +use lightning_liquidity::events::LiquidityEvent; +use lightning_liquidity::lsps0::ser::LSPSDateTime; use lightning_liquidity::lsps2::client::LSPS2ClientConfig; use lightning_liquidity::lsps2::event::{LSPS2ClientEvent, LSPS2ServiceEvent}; -use lightning_liquidity::lsps2::msgs::RawOpeningFeeParams; +use lightning_liquidity::lsps2::msgs::LSPS2RawOpeningFeeParams; use lightning_liquidity::lsps2::service::LSPS2ServiceConfig; use lightning_liquidity::lsps2::utils::is_valid_opening_fee_params; use lightning_liquidity::{LiquidityClientConfig, LiquidityServiceConfig}; @@ -24,8 +25,7 @@ use bitcoin::hashes::{sha256, Hash}; use bitcoin::secp256k1::{PublicKey, Secp256k1}; use bitcoin::Network; -use chrono::DateTime; - +use std::str::FromStr; use std::time::Duration; fn create_jit_invoice( @@ -113,7 +113,7 @@ fn invoice_generation_flow() { let get_info_event = service_node.liquidity_manager.next_event().unwrap(); match get_info_event { - Event::LSPS2Service(LSPS2ServiceEvent::GetInfo { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::GetInfo { request_id, counterparty_node_id, token, @@ -125,10 +125,10 @@ fn invoice_generation_flow() { _ => panic!("Unexpected event"), } - let raw_opening_params = RawOpeningFeeParams { + let raw_opening_params = LSPS2RawOpeningFeeParams { min_fee_msat: 100, proportional: 21, - valid_until: DateTime::parse_from_rfc3339("2035-05-20T08:30:45Z").unwrap().into(), + valid_until: LSPSDateTime::from_str("2035-05-20T08:30:45Z").unwrap(), min_lifetime: 144, max_client_to_self_delay: 128, min_payment_size_msat: 1, @@ -151,7 +151,7 @@ fn invoice_generation_flow() { let opening_params_event = client_node.liquidity_manager.next_event().unwrap(); let opening_fee_params = match opening_params_event { - Event::LSPS2Client(LSPS2ClientEvent::OpeningParametersReady { + LiquidityEvent::LSPS2Client(LSPS2ClientEvent::OpeningParametersReady { request_id, counterparty_node_id, opening_fee_params_menu, @@ -175,7 +175,7 @@ fn invoice_generation_flow() { let buy_event = service_node.liquidity_manager.next_event().unwrap(); match buy_event { - Event::LSPS2Service(LSPS2ServiceEvent::BuyRequest { + LiquidityEvent::LSPS2Service(LSPS2ServiceEvent::BuyRequest { request_id, counterparty_node_id, opening_fee_params: ofp, @@ -210,7 +210,7 @@ fn invoice_generation_flow() { let invoice_params_event = client_node.liquidity_manager.next_event().unwrap(); match invoice_params_event { - Event::LSPS2Client(LSPS2ClientEvent::InvoiceParametersReady { + LiquidityEvent::LSPS2Client(LSPS2ClientEvent::InvoiceParametersReady { request_id, counterparty_node_id, intercept_scid: iscid, diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index e6ad42e5bcd..8af33cef55b 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -113,7 +113,7 @@ pub(crate) fn do_test_data_migration // Integration-test the given KVStore implementation. Test relaying a few payments and check that // the persisted data is updated the appropriate number of times. -pub(crate) fn do_test_store(store_0: &K, store_1: &K) { +pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let chanmon_cfgs = create_chanmon_cfgs(2); let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let chain_mon_0 = test_utils::TestChainMonitor::new( diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 6417d231f9e..e62c4251b01 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.1" +version = "0.1.2" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -51,6 +51,7 @@ libm = { version = "0.2", default-features = false } [dev-dependencies] regex = "1.5.6" lightning-types = { version = "0.2.0", path = "../lightning-types", features = ["_test_utils"] } +parking_lot = { version = "0.12", default-features = false } [dev-dependencies.bitcoin] version = "0.32.2" diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c74b42cf5ff..7671c5529fc 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1509,8 +1509,8 @@ impl ChannelMonitor { fn provide_latest_holder_commitment_tx( &self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, - ) -> Result<(), ()> { - self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs, &Vec::new(), Vec::new()).map_err(|_| ()) + ) { + self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs, &Vec::new(), Vec::new()) } /// This is used to provide payment preimage(s) out-of-band during startup without updating the @@ -1737,10 +1737,14 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_cur_holder_commitment_number() } - /// Gets whether we've been notified that this channel is closed by the `ChannelManager` (i.e. - /// via a [`ChannelMonitorUpdateStep::ChannelForceClosed`]). - pub(crate) fn offchain_closed(&self) -> bool { - self.inner.lock().unwrap().lockdown_from_offchain + /// Fetches whether this monitor has marked the channel as closed and will refuse any further + /// updates to the commitment transactions. + /// + /// It can be marked closed in a few different ways, including via a + /// [`ChannelMonitorUpdateStep::ChannelForceClosed`] or if the channel has been closed + /// on-chain. + pub(crate) fn no_further_updates_allowed(&self) -> bool { + self.inner.lock().unwrap().no_further_updates_allowed() } /// Gets the `node_id` of the counterparty for this channel. @@ -2901,7 +2905,7 @@ impl ChannelMonitorImpl { /// is important that any clones of this channel monitor (including remote clones) by kept /// up-to-date as our holder commitment transaction is updated. /// Panics if set_on_holder_tx_csv has never been called. - fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, mut htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, claimed_htlcs: &[(SentHTLCId, PaymentPreimage)], nondust_htlc_sources: Vec) -> Result<(), &'static str> { + fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, mut htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, claimed_htlcs: &[(SentHTLCId, PaymentPreimage)], nondust_htlc_sources: Vec) { if htlc_outputs.iter().any(|(_, s, _)| s.is_some()) { // If we have non-dust HTLCs in htlc_outputs, ensure they match the HTLCs in the // `holder_commitment_tx`. In the future, we'll no longer provide the redundant data @@ -2978,10 +2982,6 @@ impl ChannelMonitorImpl { } self.counterparty_fulfilled_htlcs.insert(*claimed_htlc_id, *claimed_preimage); } - if self.holder_tx_signed { - return Err("Latest holder commitment signed has already been signed, update is rejected"); - } - Ok(()) } /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all @@ -3202,11 +3202,7 @@ impl ChannelMonitorImpl { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => { log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info"); if self.lockdown_from_offchain { panic!(); } - if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone(), &claimed_htlcs, nondust_htlc_sources.clone()) { - log_error!(logger, "Providing latest holder commitment transaction failed/was refused:"); - log_error!(logger, " {}", e); - ret = Err(()); - } + self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone(), &claimed_htlcs, nondust_htlc_sources.clone()); } ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_per_commitment_point, .. } => { log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info"); @@ -3286,12 +3282,16 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed) && is_pre_close_update { + if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret } } + fn no_further_updates_allowed(&self) -> bool { + self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed + } + fn get_latest_update_id(&self) -> u64 { self.latest_update_id } @@ -3564,11 +3564,16 @@ impl ChannelMonitorImpl { return (claimable_outpoints, to_counterparty_output_info); } let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features); + let counterparty_spendable_height = if htlc.offered { + htlc.cltv_expiry + } else { + height + }; let justice_package = PackageTemplate::build_package( commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), - htlc.cltv_expiry, + counterparty_spendable_height, ); claimable_outpoints.push(justice_package); } @@ -3869,35 +3874,32 @@ impl ChannelMonitorImpl { } } } - if self.holder_tx_signed { - // If we've signed, we may have broadcast either commitment (prev or current), and - // attempted to claim from it immediately without waiting for a confirmation. - if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid { + // Cancel any pending claims for any holder commitments in case they had previously + // confirmed or been signed (in which case we will start attempting to claim without + // waiting for confirmation). + if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid { + log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}", + self.current_holder_commitment_tx.txid); + let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 }; + for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs { + if let Some(vout) = htlc.transaction_output_index { + outpoint.vout = vout; + self.onchain_tx_handler.abandon_claim(&outpoint); + } + } + } + if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx { + if prev_holder_commitment_tx.txid != *confirmed_commitment_txid { log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}", - self.current_holder_commitment_tx.txid); - let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 }; - for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs { + prev_holder_commitment_tx.txid); + let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 }; + for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs { if let Some(vout) = htlc.transaction_output_index { outpoint.vout = vout; self.onchain_tx_handler.abandon_claim(&outpoint); } } } - if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx { - if prev_holder_commitment_tx.txid != *confirmed_commitment_txid { - log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}", - prev_holder_commitment_tx.txid); - let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 }; - for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs { - if let Some(vout) = htlc.transaction_output_index { - outpoint.vout = vout; - self.onchain_tx_handler.abandon_claim(&outpoint); - } - } - } - } - } else { - // No previous claim. } } @@ -4233,7 +4235,7 @@ impl ChannelMonitorImpl { } } - if self.lockdown_from_offchain || self.funding_spend_seen || self.holder_tx_signed { + if self.no_further_updates_allowed() { // Fail back HTLCs on backwards channels if they expire within // `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a // point where no further off-chain updates will be accepted). If we haven't seen the @@ -5384,7 +5386,7 @@ mod tests { let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(), - htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap(); + htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"1").to_byte_array()), preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"2").to_byte_array()), @@ -5422,7 +5424,7 @@ mod tests { let mut htlcs = preimages_slice_to_htlcs!(preimages[0..5]); let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(), - htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap(); + htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()); secret[0..32].clone_from_slice(&>::from_hex("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); monitor.provide_secret(281474976710653, secret.clone()).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 12); @@ -5433,7 +5435,7 @@ mod tests { let mut htlcs = preimages_slice_to_htlcs!(preimages[0..3]); let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx, - htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap(); + htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()); secret[0..32].clone_from_slice(&>::from_hex("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); monitor.provide_secret(281474976710652, secret.clone()).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 5); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 55214006d4c..bd6912c21f8 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -699,8 +699,13 @@ impl PackageSolvingData { match self { PackageSolvingData::RevokedOutput(RevokedOutput { .. }) => PackageMalleability::Malleable(AggregationCluster::Unpinnable), - PackageSolvingData::RevokedHTLCOutput(..) => - PackageMalleability::Malleable(AggregationCluster::Pinnable), + PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { htlc, .. }) => { + if htlc.offered { + PackageMalleability::Malleable(AggregationCluster::Unpinnable) + } else { + PackageMalleability::Malleable(AggregationCluster::Pinnable) + } + }, PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => PackageMalleability::Malleable(AggregationCluster::Unpinnable), PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => @@ -771,10 +776,12 @@ pub struct PackageTemplate { /// Block height at which our counterparty can potentially claim this output as well (assuming /// they have the keys or information required to do so). /// - /// This is used primarily by external consumers to decide when an output becomes "pinnable" - /// because the counterparty can potentially spend it. It is also used internally by - /// [`Self::get_height_timer`] to identify when an output must be claimed by, depending on the - /// type of output. + /// This is used primarily to decide when an output becomes "pinnable" because the counterparty + /// can potentially spend it. It is also used internally by [`Self::get_height_timer`] to + /// identify when an output must be claimed by, depending on the type of output. + /// + /// Note that for revoked counterparty HTLC outputs the value may be zero in some cases where + /// we upgraded from LDK 0.1 or prior. counterparty_spendable_height: u32, // Cache of package feerate committed at previous (re)broadcast. If bumping resources // (either claimed output value or external utxo), it will keep increasing until holder @@ -834,17 +841,17 @@ impl PackageTemplate { // Now check that we only merge packages if they are both unpinnable or both // pinnable. let self_pinnable = self_cluster == AggregationCluster::Pinnable || - self.counterparty_spendable_height() <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + self.counterparty_spendable_height <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; let other_pinnable = other_cluster == AggregationCluster::Pinnable || - other.counterparty_spendable_height() <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + other.counterparty_spendable_height <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; if self_pinnable && other_pinnable { return true; } let self_unpinnable = self_cluster == AggregationCluster::Unpinnable && - self.counterparty_spendable_height() > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + self.counterparty_spendable_height > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; let other_unpinnable = other_cluster == AggregationCluster::Unpinnable && - other.counterparty_spendable_height() > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + other.counterparty_spendable_height > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; if self_unpinnable && other_unpinnable { return true; } @@ -855,13 +862,6 @@ impl PackageTemplate { pub(crate) fn is_malleable(&self) -> bool { matches!(self.malleability, PackageMalleability::Malleable(..)) } - /// The height at which our counterparty may be able to spend this output. - /// - /// This is an important limit for aggregation as after this height our counterparty may be - /// able to pin transactions spending this output in the mempool. - pub(crate) fn counterparty_spendable_height(&self) -> u32 { - self.counterparty_spendable_height - } pub(crate) fn previous_feerate(&self) -> u64 { self.feerate_previous } @@ -1225,6 +1225,18 @@ impl Readable for PackageTemplate { (4, _height_original, option), // Written with a dummy value since 0.1 (6, height_timer, option), }); + for (_, input) in &inputs { + if let PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { htlc, .. }) = input { + // LDK versions through 0.1 set the wrong counterparty_spendable_height for + // non-offered revoked HTLCs (ie HTLCs we sent to our counterparty which they can + // claim with a preimage immediately). Here we detect this and reset the value to + // zero, as the value is unused except for merging decisions which doesn't care + // about any values below the current height. + if !htlc.offered && htlc.cltv_expiry == counterparty_spendable_height { + counterparty_spendable_height = 0; + } + } + } Ok(PackageTemplate { inputs, malleability, diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index fa9badf87fa..4fa2871ddcb 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -54,6 +54,9 @@ extern crate alloc; pub extern crate lightning_types as types; pub extern crate bitcoin; + +pub extern crate lightning_invoice as bolt11_invoice; + #[cfg(any(test, feature = "std"))] extern crate core; @@ -63,6 +66,8 @@ extern crate core; #[cfg(ldk_bench)] extern crate criterion; +#[cfg(all(feature = "std", test))] extern crate parking_lot; + #[macro_use] pub mod util; pub mod chain; diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 2d01ece1158..ad1e6c26b98 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3819,3 +3819,225 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); } + +#[test] +#[cfg(all(feature = "std", not(target_os = "windows")))] +fn test_single_channel_multiple_mpp() { + use std::sync::atomic::{AtomicBool, Ordering}; + + // Test what happens when we attempt to claim an MPP with many parts that came to us through + // the same channel with a synchronous persistence interface which has very high latency. + // + // Previously, if a `revoke_and_ack` came in while we were still running in + // `ChannelManager::claim_payment` we'd end up hanging waiting to apply a + // `ChannelMonitorUpdate` until after it completed. See the commit which introduced this test + // for more info. + let chanmon_cfgs = create_chanmon_cfgs(9); + let node_cfgs = create_node_cfgs(9, &chanmon_cfgs); + let configs = [None, None, None, None, None, None, None, None, None]; + let node_chanmgrs = create_node_chanmgrs(9, &node_cfgs, &configs); + let mut nodes = create_network(9, &node_cfgs, &node_chanmgrs); + + let node_7_id = nodes[7].node.get_our_node_id(); + let node_8_id = nodes[8].node.get_our_node_id(); + + // Send an MPP payment in six parts along the path shown from top to bottom + // 0 + // 1 2 3 4 5 6 + // 7 + // 8 + // + // We can in theory reproduce this issue with fewer channels/HTLCs, but getting this test + // robust is rather challenging. We rely on having the main test thread wait on locks held in + // the background `claim_funds` thread and unlocking when the `claim_funds` thread completes a + // single `ChannelMonitorUpdate`. + // This thread calls `get_and_clear_pending_msg_events()` and `handle_revoke_and_ack()`, both + // of which require `ChannelManager` locks, but we have to make sure this thread gets a chance + // to be blocked on the mutexes before we let the background thread wake `claim_funds` so that + // the mutex can switch to this main thread. + // This relies on our locks being fair, but also on our threads getting runtime during the test + // run, which can be pretty competitive. Thus we do a dumb dance to be as conservative as + // possible - we have a background thread which completes a `ChannelMonitorUpdate` (by sending + // into the `write_blocker` mpsc) but it doesn't run until a mpsc channel sends from this main + // thread to the background thread, and then we let it sleep a while before we send the + // `ChannelMonitorUpdate` unblocker. + // Further, we give ourselves two chances each time, needing 4 HTLCs just to unlock our two + // `ChannelManager` calls. We then need a few remaining HTLCs to actually trigger the bug, so + // we use 6 HTLCs. + // Finaly, we do not run this test on Winblowz because it, somehow, in 2025, does not implement + // actual preemptive multitasking and thinks that cooperative multitasking somehow is + // acceptable in the 21st century, let alone a quarter of the way into it. + const MAX_THREAD_INIT_TIME: std::time::Duration = std::time::Duration::from_secs(1); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 3, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 4, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 5, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 6, 100_000, 0); + + create_announced_chan_between_nodes_with_value(&nodes, 1, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 2, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 3, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 4, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 5, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 6, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 7, 8, 1_000_000, 0); + + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[8], 50_000_000); + + send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[7], &nodes[8]], &[&nodes[2], &nodes[7], &nodes[8]], &[&nodes[3], &nodes[7], &nodes[8]], &[&nodes[4], &nodes[7], &nodes[8]], &[&nodes[5], &nodes[7], &nodes[8]], &[&nodes[6], &nodes[7], &nodes[8]]], 50_000_000, payment_hash, payment_secret); + + let (do_a_write, blocker) = std::sync::mpsc::sync_channel(0); + *nodes[8].chain_monitor.write_blocker.lock().unwrap() = Some(blocker); + + // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }. + // We do this by casting a pointer to a `TestChannelManager` to a pointer to a + // `TestChannelManager` with different (in this case 'static) lifetime. + // This is even suggested in the second example at + // https://doc.rust-lang.org/std/mem/fn.transmute.html#examples + let claim_node: &'static TestChannelManager<'static, 'static> = + unsafe { std::mem::transmute(nodes[8].node as &TestChannelManager) }; + let thrd = std::thread::spawn(move || { + // Initiate the claim in a background thread as it will immediately block waiting on the + // `write_blocker` we set above. + claim_node.claim_funds(payment_preimage); + }); + + // First unlock one monitor so that we have a pending + // `update_fulfill_htlc`/`commitment_signed` pair to pass to our counterparty. + do_a_write.send(()).unwrap(); + + // Then fetch the `update_fulfill_htlc`/`commitment_signed`. Note that the + // `get_and_clear_pending_msg_events` will immediately hang trying to take a peer lock which + // `claim_funds` is holding. Thus, we release a second write after a small sleep in the + // background to give `claim_funds` a chance to step forward, unblocking + // `get_and_clear_pending_msg_events`. + let do_a_write_background = do_a_write.clone(); + let block_thrd2 = AtomicBool::new(true); + let block_thrd2_read: &'static AtomicBool = unsafe { std::mem::transmute(&block_thrd2) }; + let thrd2 = std::thread::spawn(move || { + while block_thrd2_read.load(Ordering::Acquire) { + std::thread::yield_now(); + } + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + }); + block_thrd2.store(false, Ordering::Release); + let first_updates = get_htlc_update_msgs(&nodes[8], &nodes[7].node.get_our_node_id()); + thrd2.join().unwrap(); + + // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back + nodes[7].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[2].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[3].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[4].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[5].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[6].node.get_our_node_id()); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &first_updates.update_fulfill_htlcs[0]); + check_added_monitors(&nodes[7], 1); + expect_payment_forwarded!(nodes[7], nodes[1], nodes[8], Some(1000), false, false); + nodes[7].node.handle_commitment_signed(node_8_id, &first_updates.commitment_signed); + check_added_monitors(&nodes[7], 1); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + + // Now, handle the `revoke_and_ack` from node 5. Note that `claim_funds` is still blocked on + // our peer lock, so we have to release a write to let it process. + // After this call completes, the channel previously would be locked up and should not be able + // to make further progress. + let do_a_write_background = do_a_write.clone(); + let block_thrd3 = AtomicBool::new(true); + let block_thrd3_read: &'static AtomicBool = unsafe { std::mem::transmute(&block_thrd3) }; + let thrd3 = std::thread::spawn(move || { + while block_thrd3_read.load(Ordering::Acquire) { + std::thread::yield_now(); + } + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + }); + block_thrd3.store(false, Ordering::Release); + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + thrd3.join().unwrap(); + assert!(!thrd.is_finished()); + + let thrd4 = std::thread::spawn(move || { + do_a_write.send(()).unwrap(); + do_a_write.send(()).unwrap(); + }); + + thrd4.join().unwrap(); + thrd.join().unwrap(); + + expect_payment_claimed!(nodes[8], payment_hash, 50_000_000); + + // At the end, we should have 7 ChannelMonitorUpdates - 6 for HTLC claims, and one for the + // above `revoke_and_ack`. + check_added_monitors(&nodes[8], 7); + + // Now drive everything to the end, at least as far as node 7 is concerned... + *nodes[8].chain_monitor.write_blocker.lock().unwrap() = None; + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 1); + + let (updates, raa) = get_updates_and_revoke(&nodes[8], &nodes[7].node.get_our_node_id()); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[7], nodes[2], nodes[8], Some(1000), false, false); + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + expect_payment_forwarded!(nodes[7], nodes[3], nodes[8], Some(1000), false, false); + let mut next_source = 4; + if let Some(update) = updates.update_fulfill_htlcs.get(2) { + nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + expect_payment_forwarded!(nodes[7], nodes[4], nodes[8], Some(1000), false, false); + next_source += 1; + } + + nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + if updates.update_fulfill_htlcs.get(2).is_some() { + check_added_monitors(&nodes[7], 5); + } else { + check_added_monitors(&nodes[7], 4); + } + + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 2); + + let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_7_id); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + next_source += 1; + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + next_source += 1; + if let Some(update) = updates.update_fulfill_htlcs.get(2) { + nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + } + + nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + if updates.update_fulfill_htlcs.get(2).is_some() { + check_added_monitors(&nodes[7], 5); + } else { + check_added_monitors(&nodes[7], 4); + } + + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 2); + + let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_7_id); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + check_added_monitors(&nodes[7], 1); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9adf4f4cb8d..36f79ba0346 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1109,7 +1109,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// A pending MPP claim which hasn't yet completed. /// /// Not written to disk. - pending_mpp_claim: Option<(PublicKey, ChannelId, u64, PendingMPPClaimPointer)>, + pending_mpp_claim: Option<(PublicKey, ChannelId, PendingMPPClaimPointer)>, }, /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the /// operation of another channel. @@ -1211,10 +1211,16 @@ impl From<&MPPClaimHTLCSource> for HTLCClaimSource { } } +#[derive(Debug)] +pub(crate) struct PendingMPPClaim { + channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, + channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, +} + #[derive(Clone, Debug, Hash, PartialEq, Eq)] /// The source of an HTLC which is being claimed as a part of an incoming payment. Each part is -/// tracked in [`PendingMPPClaim`] as well as in [`ChannelMonitor`]s, so that it can be converted -/// to an [`HTLCClaimSource`] for claim replays on startup. +/// tracked in [`ChannelMonitor`]s, so that it can be converted to an [`HTLCClaimSource`] for claim +/// replays on startup. struct MPPClaimHTLCSource { counterparty_node_id: PublicKey, funding_txo: OutPoint, @@ -1229,12 +1235,6 @@ impl_writeable_tlv_based!(MPPClaimHTLCSource, { (6, htlc_id, required), }); -#[derive(Debug)] -pub(crate) struct PendingMPPClaim { - channels_without_preimage: Vec, - channels_with_preimage: Vec, -} - #[derive(Clone, Debug, PartialEq, Eq)] /// When we're claiming a(n MPP) payment, we want to store information about that payment in the /// [`ChannelMonitor`] so that we can replay the claim without any information from the @@ -7020,8 +7020,15 @@ where } }).collect(); let pending_mpp_claim_ptr_opt = if sources.len() > 1 { + let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len()); + for part in mpp_parts.iter() { + let chan = (part.counterparty_node_id, part.funding_txo, part.channel_id); + if !channels_without_preimage.contains(&chan) { + channels_without_preimage.push(chan); + } + } Some(Arc::new(Mutex::new(PendingMPPClaim { - channels_without_preimage: mpp_parts.clone(), + channels_without_preimage, channels_with_preimage: Vec::new(), }))) } else { @@ -7032,7 +7039,7 @@ where let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); - Some((cp_id, htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id, claim_ptr)) + Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) } else { None } @@ -7378,7 +7385,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { - if let Some((counterparty_node_id, chan_id, htlc_id, claim_ptr)) = pending_mpp_claim { + if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -7389,24 +7396,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if *pending_claim == claim_ptr { let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); let pending_claim_state = &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain(|htlc_info| { + pending_claim_state.channels_without_preimage.retain(|(cp, op, cid)| { let this_claim = - htlc_info.counterparty_node_id == counterparty_node_id - && htlc_info.channel_id == chan_id - && htlc_info.htlc_id == htlc_id; + *cp == counterparty_node_id && *cid == chan_id; if this_claim { - pending_claim_state.channels_with_preimage.push(htlc_info.clone()); + pending_claim_state.channels_with_preimage.push((*cp, *op, *cid)); false } else { true } }); if pending_claim_state.channels_without_preimage.is_empty() { - for htlc_info in pending_claim_state.channels_with_preimage.iter() { - let freed_chan = ( - htlc_info.counterparty_node_id, - htlc_info.funding_txo, - htlc_info.channel_id, - blocker.clone() - ); + for (cp, op, cid) in pending_claim_state.channels_with_preimage.iter() { + let freed_chan = (*cp, *op, *cid, blocker.clone()); freed_channels.push(freed_chan); } } @@ -13367,8 +13367,8 @@ where // claim. // Note that a `ChannelMonitor` is created with `update_id` 0 and after we // provide it with a closure update its `update_id` will be at 1. - if !monitor.offchain_closed() || monitor.get_latest_update_id() > 1 { - should_queue_fc_update = !monitor.offchain_closed(); + if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 { + should_queue_fc_update = !monitor.no_further_updates_allowed(); let mut latest_update_id = monitor.get_latest_update_id(); if should_queue_fc_update { latest_update_id += 1; @@ -14235,8 +14235,16 @@ where if payment_claim.mpp_parts.is_empty() { return Err(DecodeError::InvalidValue); } + let mut channels_without_preimage = payment_claim.mpp_parts.iter() + .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) + .collect::>(); + // If we have multiple MPP parts which were received over the same channel, + // we only track it once as once we get a preimage durably in the + // `ChannelMonitor` it will be used for all HTLCs with a matching hash. + channels_without_preimage.sort_unstable(); + channels_without_preimage.dedup(); let pending_claims = PendingMPPClaim { - channels_without_preimage: payment_claim.mpp_parts.clone(), + channels_without_preimage, channels_with_preimage: Vec::new(), }; let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims))); @@ -14269,7 +14277,7 @@ where for part in payment_claim.mpp_parts.iter() { let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| ( - part.counterparty_node_id, part.channel_id, part.htlc_id, + part.counterparty_node_id, part.channel_id, PendingMPPClaimPointer(Arc::clone(&ptr)) )); let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 63341969326..be77547b79c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -10,7 +10,7 @@ //! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. -use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason}; @@ -399,7 +399,7 @@ pub struct NodeCfg<'a> { pub override_init_features: Rc>>, } -type TestChannelManager<'node_cfg, 'chan_mon_cfg> = ChannelManager< +pub(crate) type TestChannelManager<'node_cfg, 'chan_mon_cfg> = ChannelManager< &'node_cfg TestChainMonitor<'chan_mon_cfg>, &'chan_mon_cfg test_utils::TestBroadcaster, &'node_cfg test_utils::TestKeysInterface, @@ -779,6 +779,26 @@ pub fn get_revoke_commit_msgs>(node: & }) } +/// Gets a `UpdateHTLCs` and `revoke_and_ack` (i.e. after we get a responding `commitment_signed` +/// while we have updates in the holding cell). +pub fn get_updates_and_revoke>(node: &H, recipient: &PublicKey) -> (msgs::CommitmentUpdate, msgs::RevokeAndACK) { + let events = node.node().get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + (match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(node_id, recipient); + (*updates).clone() + }, + _ => panic!("Unexpected event"), + }, match events[1] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(node_id, recipient); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }) +} + #[macro_export] /// Gets an RAA and CS which were sent in response to a commitment update /// @@ -3259,7 +3279,7 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec(node_count: usize, chanmon_cfgs: &'a Vec, persisters: Vec<&'a impl Persist>) -> Vec> { +pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec, persisters: Vec<&'a impl test_utils::SyncPersist>) -> Vec> { let mut nodes = Vec::new(); for i in 0..node_count { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index b29ee99e077..bdb1621771f 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -15,7 +15,7 @@ use crate::chain; use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; -use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE}; use crate::chain::transaction::OutPoint; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; @@ -2645,14 +2645,12 @@ fn test_justice_tx_htlc_timeout() { mine_transaction(&nodes[1], &revoked_local_txn[0]); { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // The unpinnable, revoked to_self output, and the pinnable, revoked htlc output will - // be claimed in separate transactions. - assert_eq!(node_txn.len(), 2); - for tx in node_txn.iter() { - assert_eq!(tx.input.len(), 1); - check_spends!(tx, revoked_local_txn[0]); - } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + // The revoked HTLC output is not pinnable for another `TEST_FINAL_CLTV` blocks, and is + // thus claimed in the same transaction with the revoked to_self output. + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 2); + check_spends!(node_txn[0], revoked_local_txn[0]); + assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); node_txn.clear(); } check_added_monitors!(nodes[1], 1); @@ -2872,28 +2870,26 @@ fn claim_htlc_outputs() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 2); // Two penalty transactions: - assert_eq!(node_txn[0].input.len(), 1); // Claims the unpinnable, revoked output. - assert_eq!(node_txn[1].input.len(), 2); // Claims both pinnable, revoked HTLC outputs separately. - check_spends!(node_txn[0], revoked_local_txn[0]); - check_spends!(node_txn[1], revoked_local_txn[0]); - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[1].previous_output); - assert_ne!(node_txn[1].input[0].previous_output, node_txn[1].input[1].previous_output); + assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty txn + + // The ChannelMonitor should claim the accepted HTLC output separately from the offered + // HTLC and to_self outputs. + let accepted_claim = node_txn.iter().filter(|tx| tx.input.len() == 1).next().unwrap(); + let offered_to_self_claim = node_txn.iter().filter(|tx| tx.input.len() == 2).next().unwrap(); + check_spends!(accepted_claim, revoked_local_txn[0]); + check_spends!(offered_to_self_claim, revoked_local_txn[0]); + assert_eq!(accepted_claim.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); let mut witness_lens = BTreeSet::new(); - witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len()); - witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len()); - witness_lens.insert(node_txn[1].input[1].witness.last().unwrap().len()); - assert_eq!(witness_lens.len(), 3); + witness_lens.insert(offered_to_self_claim.input[0].witness.last().unwrap().len()); + witness_lens.insert(offered_to_self_claim.input[1].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 2); assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local - assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC - assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); - // Finally, mine the penalty transactions and check that we get an HTLC failure after + // Finally, mine the penalty transaction and check that we get an HTLC failure after // ANTI_REORG_DELAY confirmations. - mine_transaction(&nodes[1], &node_txn[0]); - mine_transaction(&nodes[1], &node_txn[1]); + mine_transaction(&nodes[1], accepted_claim); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], payment_hash_2, false); } @@ -5056,8 +5052,7 @@ fn test_static_spendable_outputs_timeout_tx() { check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs } -#[test] -fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { +fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5073,20 +5068,28 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + if split_tx { + connect_blocks(&nodes[1], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE + 1); + } + mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); - // The unpinnable, revoked to_self output and the pinnable, revoked HTLC output will be claimed - // in separate transactions. + // If the HTLC expires in more than COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE blocks, we'll + // claim both the revoked and HTLC outputs in one transaction, otherwise we'll split them as we + // consider the HTLC output as pinnable and want to claim pinnable and unpinnable outputs + // separately. let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 2); + assert_eq!(node_txn.len(), if split_tx { 2 } else { 1 }); for tx in node_txn.iter() { - assert_eq!(tx.input.len(), 1); + assert_eq!(tx.input.len(), if split_tx { 1 } else { 2 }); check_spends!(tx, revoked_local_txn[0]); } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + if split_tx { + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + } mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -5096,6 +5099,12 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { check_spends!(spend_txn[0], node_txn[0]); } +#[test] +fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { + do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(true); + do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(false); +} + #[test] fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); @@ -5128,6 +5137,10 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout + // In order to connect `revoked_htlc_txn[0]` we must first advance the chain by + // `TEST_FINAL_CLTV` blocks as otherwise the transaction is consensus-invalid due to its + // locktime. + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // B will generate justice tx from A's revoked commitment/HTLC tx connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index e2c76643348..92b19790be5 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -10,7 +10,7 @@ //! Further functional tests which test blockchain reorganizations. use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; @@ -1734,6 +1734,12 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { assert_eq!(revoked_htlc_success.lock_time, LockTime::ZERO); assert_ne!(revoked_htlc_timeout.lock_time, LockTime::ZERO); + // First connect blocks until the HTLC expires with + // `COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE` blocks, making us consider all the HTLCs + // pinnable claims, which the remainder of the test assumes. + connect_blocks(&nodes[0], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], + [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); @@ -1846,8 +1852,6 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], revoked_htlc_timeout.lock_time.to_consensus_u32() - nodes[0].best_block_info().1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], - [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); // As time goes on A may split its revocation claim transaction into multiple. let as_fewer_input_rbf = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); for tx in as_fewer_input_rbf.iter() { @@ -3296,10 +3300,10 @@ fn test_update_replay_panics() { // Ensure applying the force-close update skipping the last normal update fails let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // Then apply the last normal and force-close update and make sure applying the preimage // updates out-of-order fails. @@ -3307,17 +3311,17 @@ fn test_update_replay_panics() { monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // Make sure re-applying the force-close update fails let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // ...and finally ensure that applying all the updates succeeds. monitor.update_monitor(&updates[2], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 18f658ca108..ca804a921d2 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -1575,6 +1575,8 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. + /// + /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; /// Handle an incoming `channel_reestablish` message from the given peer. fn handle_channel_reestablish(&self, their_node_id: PublicKey, msg: &ChannelReestablish); @@ -1704,6 +1706,8 @@ pub trait OnionMessageHandler { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. + /// + /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. fn peer_connected(&self, their_node_id: PublicKey, init: &Init, inbound: bool) -> Result<(), ()>; /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 177bd051e09..0cb2b050c14 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -88,6 +88,8 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. + /// + /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; /// Gets the node feature flags which this handler itself supports. All available handlers are @@ -1717,10 +1719,13 @@ impl= 253. - $self.bytes.len() - + $self.experimental_bytes.len() - + if $self.contents.is_for_offer() { 0 } else { 2 }, - $self.bytes.capacity(), - ); $self.bytes.extend_from_slice(&$self.experimental_bytes); Ok(Bolt12Invoice { @@ -966,13 +940,6 @@ impl Hash for Bolt12Invoice { } impl InvoiceContents { - fn is_for_offer(&self) -> bool { - match self { - InvoiceContents::ForOffer { .. } => true, - InvoiceContents::ForRefund { .. } => false, - } - } - /// Whether the original offer or refund has expired. #[cfg(feature = "std")] fn is_offer_or_refund_expired(&self) -> bool { @@ -1363,7 +1330,11 @@ pub(super) const EXPERIMENTAL_INVOICE_TYPES: core::ops::RangeFrom = 3_000_0 #[cfg(not(test))] tlv_stream!( - ExperimentalInvoiceTlvStream, ExperimentalInvoiceTlvStreamRef, EXPERIMENTAL_INVOICE_TYPES, {} + ExperimentalInvoiceTlvStream, ExperimentalInvoiceTlvStreamRef, EXPERIMENTAL_INVOICE_TYPES, { + // When adding experimental TLVs, update EXPERIMENTAL_TLV_ALLOCATION_SIZE accordingly in + // both UnsignedBolt12Invoice:new and UnsignedStaticInvoice::new to avoid unnecessary + // allocations. + } ); #[cfg(test)] @@ -2881,9 +2852,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -2918,9 +2886,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -2983,9 +2948,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) @@ -3022,9 +2984,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) diff --git a/lightning/src/offers/invoice_request.rs b/lightning/src/offers/invoice_request.rs index e65520f4dea..382a9867702 100644 --- a/lightning/src/offers/invoice_request.rs +++ b/lightning/src/offers/invoice_request.rs @@ -77,7 +77,7 @@ use crate::ln::channelmanager::PaymentId; use crate::types::features::InvoiceRequestFeatures; use crate::ln::inbound_payment::{ExpandedKey, IV_LEN}; use crate::ln::msgs::DecodeError; -use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self, SIGNATURE_TLV_RECORD_SIZE}; +use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self}; use crate::offers::nonce::Nonce; use crate::offers::offer::{Amount, EXPERIMENTAL_OFFER_TYPES, ExperimentalOfferTlvStream, ExperimentalOfferTlvStreamRef, OFFER_TYPES, Offer, OfferContents, OfferId, OfferTlvStream, OfferTlvStreamRef}; use crate::offers::parse::{Bolt12ParseError, ParsedMessage, Bolt12SemanticError}; @@ -473,17 +473,8 @@ impl UnsignedInvoiceRequest { _experimental_offer_tlv_stream, experimental_invoice_request_tlv_stream, ) = contents.as_tlv_stream(); - // Allocate enough space for the invoice_request, which will include: - // - all TLV records from `offer.bytes`, - // - all invoice_request-specific TLV records, and - // - a signature TLV record once the invoice_request is signed. - let mut bytes = Vec::with_capacity( - offer.bytes.len() - + payer_tlv_stream.serialized_length() - + invoice_request_tlv_stream.serialized_length() - + SIGNATURE_TLV_RECORD_SIZE - + experimental_invoice_request_tlv_stream.serialized_length(), - ); + const INVOICE_REQUEST_ALLOCATION_SIZE: usize = 512; + let mut bytes = Vec::with_capacity(INVOICE_REQUEST_ALLOCATION_SIZE); payer_tlv_stream.write(&mut bytes).unwrap(); @@ -495,23 +486,16 @@ impl UnsignedInvoiceRequest { invoice_request_tlv_stream.write(&mut bytes).unwrap(); - let mut experimental_tlv_stream = TlvStream::new(remaining_bytes) - .range(EXPERIMENTAL_OFFER_TYPES) - .peekable(); - let mut experimental_bytes = Vec::with_capacity( - remaining_bytes.len() - - experimental_tlv_stream - .peek() - .map_or(remaining_bytes.len(), |first_record| first_record.start) - + experimental_invoice_request_tlv_stream.serialized_length(), - ); + const EXPERIMENTAL_TLV_ALLOCATION_SIZE: usize = 0; + let mut experimental_bytes = Vec::with_capacity(EXPERIMENTAL_TLV_ALLOCATION_SIZE); + let experimental_tlv_stream = TlvStream::new(remaining_bytes) + .range(EXPERIMENTAL_OFFER_TYPES); for record in experimental_tlv_stream { record.write(&mut experimental_bytes).unwrap(); } experimental_invoice_request_tlv_stream.write(&mut experimental_bytes).unwrap(); - debug_assert_eq!(experimental_bytes.len(), experimental_bytes.capacity()); let tlv_stream = TlvStream::new(&bytes).chain(TlvStream::new(&experimental_bytes)); let tagged_hash = TaggedHash::from_tlv_stream(SIGNATURE_TAG, tlv_stream); @@ -546,12 +530,6 @@ macro_rules! unsigned_invoice_request_sign_method { ( signature_tlv_stream.write(&mut $self.bytes).unwrap(); // Append the experimental bytes after the signature. - debug_assert_eq!( - // The two-byte overallocation results from SIGNATURE_TLV_RECORD_SIZE accommodating TLV - // records with types >= 253. - $self.bytes.len() + $self.experimental_bytes.len() + 2, - $self.bytes.capacity(), - ); $self.bytes.extend_from_slice(&$self.experimental_bytes); Ok(InvoiceRequest { @@ -1129,7 +1107,10 @@ pub(super) const EXPERIMENTAL_INVOICE_REQUEST_TYPES: core::ops::Range = #[cfg(not(test))] tlv_stream!( ExperimentalInvoiceRequestTlvStream, ExperimentalInvoiceRequestTlvStreamRef, - EXPERIMENTAL_INVOICE_REQUEST_TYPES, {} + EXPERIMENTAL_INVOICE_REQUEST_TYPES, { + // When adding experimental TLVs, update EXPERIMENTAL_TLV_ALLOCATION_SIZE accordingly in + // UnsignedInvoiceRequest::new to avoid unnecessary allocations. + } ); #[cfg(test)] @@ -2424,11 +2405,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice_request.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice_request.bytes); @@ -2462,11 +2438,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice_request.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice_request.bytes); @@ -2510,11 +2481,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice_request.bytes) @@ -2551,11 +2517,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice_request.bytes) diff --git a/lightning/src/offers/merkle.rs b/lightning/src/offers/merkle.rs index 8c3eaaed24d..db4f4a41094 100644 --- a/lightning/src/offers/merkle.rs +++ b/lightning/src/offers/merkle.rs @@ -11,7 +11,6 @@ use bitcoin::hashes::{Hash, HashEngine, sha256}; use bitcoin::secp256k1::{Message, PublicKey, Secp256k1, self}; -use bitcoin::secp256k1::constants::SCHNORR_SIGNATURE_SIZE; use bitcoin::secp256k1::schnorr::Signature; use crate::io; use crate::util::ser::{BigSize, Readable, Writeable, Writer}; @@ -26,10 +25,6 @@ tlv_stream!(SignatureTlvStream, SignatureTlvStreamRef<'a>, SIGNATURE_TYPES, { (240, signature: Signature), }); -/// Size of a TLV record in `SIGNATURE_TYPES` when the type is 1000. TLV types are encoded using -/// BigSize, so a TLV record with type 240 will use two less bytes. -pub(super) const SIGNATURE_TLV_RECORD_SIZE: usize = 3 + 1 + SCHNORR_SIGNATURE_SIZE; - /// A hash for use in a specific context by tweaking with a context-dependent tag as per [BIP 340] /// and computed over the merkle root of a TLV stream to sign as defined in [BOLT 12]. /// @@ -253,7 +248,6 @@ pub(super) struct TlvRecord<'a> { type_bytes: &'a [u8], // The entire TLV record. pub(super) record_bytes: &'a [u8], - pub(super) start: usize, pub(super) end: usize, } @@ -278,7 +272,7 @@ impl<'a> Iterator for TlvStream<'a> { self.data.set_position(end); Some(TlvRecord { - r#type, type_bytes, record_bytes, start: start as usize, end: end as usize, + r#type, type_bytes, record_bytes, end: end as usize, }) } else { None diff --git a/lightning/src/offers/offer.rs b/lightning/src/offers/offer.rs index 045c4d3eb73..0f88ad3969a 100644 --- a/lightning/src/offers/offer.rs +++ b/lightning/src/offers/offer.rs @@ -438,7 +438,8 @@ macro_rules! offer_builder_methods { ( } } - let mut bytes = Vec::new(); + const OFFER_ALLOCATION_SIZE: usize = 512; + let mut bytes = Vec::with_capacity(OFFER_ALLOCATION_SIZE); $self.offer.write(&mut bytes).unwrap(); let id = OfferId::from_valid_offer_tlv_stream(&bytes); diff --git a/lightning/src/offers/refund.rs b/lightning/src/offers/refund.rs index a68d0eb658e..e562fb3f901 100644 --- a/lightning/src/offers/refund.rs +++ b/lightning/src/offers/refund.rs @@ -338,7 +338,8 @@ macro_rules! refund_builder_methods { ( $self.refund.payer.0 = metadata; } - let mut bytes = Vec::new(); + const REFUND_ALLOCATION_SIZE: usize = 512; + let mut bytes = Vec::with_capacity(REFUND_ALLOCATION_SIZE); $self.refund.write(&mut bytes).unwrap(); Ok(Refund { diff --git a/lightning/src/offers/static_invoice.rs b/lightning/src/offers/static_invoice.rs index 411ba3ff272..4360582a14c 100644 --- a/lightning/src/offers/static_invoice.rs +++ b/lightning/src/offers/static_invoice.rs @@ -25,7 +25,6 @@ use crate::offers::invoice_macros::{invoice_accessors_common, invoice_builder_me use crate::offers::invoice_request::InvoiceRequest; use crate::offers::merkle::{ self, SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, - SIGNATURE_TLV_RECORD_SIZE, }; use crate::offers::nonce::Nonce; use crate::offers::offer::{ @@ -288,16 +287,8 @@ impl UnsignedStaticInvoice { fn new(offer_bytes: &Vec, contents: InvoiceContents) -> Self { let (_, invoice_tlv_stream, _, experimental_invoice_tlv_stream) = contents.as_tlv_stream(); - // Allocate enough space for the invoice, which will include: - // - all TLV records from `offer_bytes`, - // - all invoice-specific TLV records, and - // - a signature TLV record once the invoice is signed. - let mut bytes = Vec::with_capacity( - offer_bytes.len() - + invoice_tlv_stream.serialized_length() - + SIGNATURE_TLV_RECORD_SIZE - + experimental_invoice_tlv_stream.serialized_length(), - ); + const INVOICE_ALLOCATION_SIZE: usize = 1024; + let mut bytes = Vec::with_capacity(INVOICE_ALLOCATION_SIZE); // Use the offer bytes instead of the offer TLV stream as the latter may have contained // unknown TLV records, which are not stored in `InvoiceContents`. @@ -309,22 +300,16 @@ impl UnsignedStaticInvoice { invoice_tlv_stream.write(&mut bytes).unwrap(); - let mut experimental_tlv_stream = - TlvStream::new(remaining_bytes).range(EXPERIMENTAL_OFFER_TYPES).peekable(); - let mut experimental_bytes = Vec::with_capacity( - remaining_bytes.len() - - experimental_tlv_stream - .peek() - .map_or(remaining_bytes.len(), |first_record| first_record.start) - + experimental_invoice_tlv_stream.serialized_length(), - ); + const EXPERIMENTAL_TLV_ALLOCATION_SIZE: usize = 0; + let mut experimental_bytes = Vec::with_capacity(EXPERIMENTAL_TLV_ALLOCATION_SIZE); + let experimental_tlv_stream = + TlvStream::new(remaining_bytes).range(EXPERIMENTAL_OFFER_TYPES); for record in experimental_tlv_stream { record.write(&mut experimental_bytes).unwrap(); } experimental_invoice_tlv_stream.write(&mut experimental_bytes).unwrap(); - debug_assert_eq!(experimental_bytes.len(), experimental_bytes.capacity()); let tlv_stream = TlvStream::new(&bytes).chain(TlvStream::new(&experimental_bytes)); let tagged_hash = TaggedHash::from_tlv_stream(SIGNATURE_TAG, tlv_stream); @@ -344,12 +329,6 @@ impl UnsignedStaticInvoice { signature_tlv_stream.write(&mut self.bytes).unwrap(); // Append the experimental bytes after the signature. - debug_assert_eq!( - // The two-byte overallocation results from SIGNATURE_TLV_RECORD_SIZE accommodating TLV - // records with types >= 253. - self.bytes.len() + self.experimental_bytes.len() + 2, - self.bytes.capacity(), - ); self.bytes.extend_from_slice(&self.experimental_bytes); Ok(StaticInvoice { bytes: self.bytes, contents: self.contents, signature }) @@ -1392,9 +1371,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -1434,9 +1410,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -1511,9 +1484,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) @@ -1555,9 +1525,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index cd9b360c538..3262984b63b 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1664,8 +1664,7 @@ where let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - // In Nov, 2023 there were about 15,000 nodes; we cap allocations to 1.5x that. - let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); + let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info: ChannelInfo = Readable::read(reader)?; @@ -1677,8 +1676,7 @@ where if nodes_count > u32::max_value() as u64 / 2 { return Err(DecodeError::InvalidValue); } - // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. - let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); + let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE); for i in 0..nodes_count { let node_id = Readable::read(reader)?; let mut node_info: NodeInfo = Readable::read(reader)?; @@ -1754,6 +1752,15 @@ where } } +// In Jan, 2025 there were about 49K channels. +// We over-allocate by a bit because 20% more is better than the double we get if we're slightly +// too low +const CHAN_COUNT_ESTIMATE: usize = 60_000; +// In Jan, 2025 there were about 15K nodes +// We over-allocate by a bit because 33% more is better than the double we get if we're slightly +// too low +const NODE_COUNT_ESTIMATE: usize = 20_000; + impl NetworkGraph where L::Target: Logger, @@ -1764,8 +1771,8 @@ where secp_ctx: Secp256k1::verification_only(), chain_hash: ChainHash::using_genesis_block(network), logger, - channels: RwLock::new(IndexedMap::new()), - nodes: RwLock::new(IndexedMap::new()), + channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)), + nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)), next_node_counter: AtomicUsize::new(0), removed_node_counters: Mutex::new(Vec::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), @@ -2541,7 +2548,7 @@ where } }; - let node_pubkey; + let mut node_pubkey = None; { let channels = self.channels.read().unwrap(); match channels.get(&msg.short_channel_id) { @@ -2560,16 +2567,31 @@ where } else { channel.node_one.as_slice() }; - node_pubkey = PublicKey::from_slice(node_id).map_err(|_| LightningError { - err: "Couldn't parse source node pubkey".to_owned(), - action: ErrorAction::IgnoreAndLog(Level::Debug), - })?; + if sig.is_some() { + // PublicKey parsing isn't entirely trivial as it requires that we check + // that the provided point is on the curve. Thus, if we don't have a + // signature to verify, we want to skip the parsing step entirely. + // This represents a substantial speedup in applying RGS snapshots. + node_pubkey = + Some(PublicKey::from_slice(node_id).map_err(|_| LightningError { + err: "Couldn't parse source node pubkey".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + })?); + } }, } } - let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); if let Some(sig) = sig { + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); + let node_pubkey = if let Some(pubkey) = node_pubkey { + pubkey + } else { + debug_assert!(false, "node_pubkey should have been decoded above"); + let err = "node_pubkey wasn't decoded but we need it to check a sig".to_owned(); + let action = ErrorAction::IgnoreAndLog(Level::Error); + return Err(LightningError { err, action }); + }; secp_verify_sig!(self.secp_ctx, &msg_hash, &sig, &node_pubkey, "channel_update"); } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index ebc828407ed..ffe54e91af5 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1165,6 +1165,7 @@ impl_writeable_tlv_based!(RouteHintHop, { #[repr(align(64))] // Force the size to 64 bytes struct RouteGraphNode { node_id: NodeId, + node_counter: u32, score: u64, // The maximum value a yet-to-be-constructed payment path might flow through this node. // This value is upper-bounded by us by: @@ -1179,7 +1180,10 @@ struct RouteGraphNode { impl cmp::Ord for RouteGraphNode { fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering { - other.score.cmp(&self.score).then_with(|| other.node_id.cmp(&self.node_id)) + other.score.cmp(&self.score) + .then_with(|| self.value_contribution_msat.cmp(&other.value_contribution_msat)) + .then_with(|| other.path_length_to_node.cmp(&self.path_length_to_node)) + .then_with(|| other.node_counter.cmp(&self.node_counter)) } } @@ -1781,6 +1785,12 @@ struct PathBuildingHop<'a> { /// decrease as well. Thus, we have to explicitly track which nodes have been processed and /// avoid processing them again. was_processed: bool, + /// If we've already processed a channel backwards from a target node, we shouldn't update our + /// selected best path from that node to the destination. This should never happen, but with + /// multiple codepaths processing channels we've had issues here in the past, so in debug-mode + /// we track it and assert on it when processing a node. + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: bool, /// When processing a node as the next best-score candidate, we want to quickly check if it is /// a direct counterparty of ours, using our local channel information immediately if we can. /// @@ -1789,6 +1799,8 @@ struct PathBuildingHop<'a> { /// updated after being initialized - it is set at the start of a route-finding pass and only /// read thereafter. is_first_hop_target: bool, + /// Identical to the above, but for handling unblinded last-hops rather than first-hops. + is_last_hop_target: bool, /// Used to compare channels when choosing the for routing. /// Includes paying for the use of a hop and the following hops, as well as /// an estimated cost of reaching this hop. @@ -1809,11 +1821,7 @@ struct PathBuildingHop<'a> { /// The value will be actually deducted from the counterparty balance on the previous link. hop_use_fee_msat: u64, - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] - // In tests, we apply further sanity checks on cases where we skip nodes we already processed - // to ensure it is specifically in cases where the fee has gone down because of a decrease in - // value_contribution_msat, which requires tracking it here. See comments below where it is - // used for more info. + /// The quantity of funds we're willing to route over this channel value_contribution_msat: u64, } @@ -1828,15 +1836,14 @@ impl<'a> core::fmt::Debug for PathBuildingHop<'a> { .field("target_node_id", &self.candidate.target()) .field("short_channel_id", &self.candidate.short_channel_id()) .field("is_first_hop_target", &self.is_first_hop_target) + .field("is_last_hop_target", &self.is_last_hop_target) .field("total_fee_msat", &self.total_fee_msat) .field("next_hops_fee_msat", &self.next_hops_fee_msat) .field("hop_use_fee_msat", &self.hop_use_fee_msat) .field("total_fee_msat - (next_hops_fee_msat + hop_use_fee_msat)", &(&self.total_fee_msat.saturating_sub(self.next_hops_fee_msat).saturating_sub(self.hop_use_fee_msat))) .field("path_penalty_msat", &self.path_penalty_msat) .field("path_htlc_minimum_msat", &self.path_htlc_minimum_msat) - .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta()); - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] - let debug_struct = debug_struct + .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta()) .field("value_contribution_msat", &self.value_contribution_msat); debug_struct.finish() } @@ -2267,8 +2274,10 @@ where L::Target: Logger { // Step (1). Prepare first and last hop targets. // - // First cache all our direct channels so that we can insert them in the heap at startup. - // Then process any blinded routes, resolving their introduction node and caching it. + // For unblinded first- and last-hop channels, cache them in maps so that we can detect them as + // we walk the graph and incorporate them into our candidate set. + // For blinded last-hop paths, look up their introduction point and cache the node counters + // identifying them. let mut first_hop_targets: HashMap<_, (Vec<&ChannelDetails>, u32)> = hash_map_with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 }); if let Some(hops) = first_hops { @@ -2300,6 +2309,56 @@ where L::Target: Logger { &payment_params, &node_counters, network_graph, &logger, our_node_id, &first_hop_targets, )?; + let mut last_hop_candidates = + hash_map_with_capacity(payment_params.payee.unblinded_route_hints().len()); + for route in payment_params.payee.unblinded_route_hints().iter() + .filter(|route| !route.0.is_empty()) + { + let hop_iter = route.0.iter().rev(); + let prev_hop_iter = core::iter::once(&maybe_dummy_payee_pk).chain( + route.0.iter().skip(1).rev().map(|hop| &hop.src_node_id)); + + for (hop, prev_hop_id) in hop_iter.zip(prev_hop_iter) { + let (target, private_target_node_counter) = + node_counters.private_node_counter_from_pubkey(&prev_hop_id) + .ok_or_else(|| { + debug_assert!(false); + LightningError { err: "We should always have private target node counters available".to_owned(), action: ErrorAction::IgnoreError } + })?; + let (_src_id, private_source_node_counter) = + node_counters.private_node_counter_from_pubkey(&hop.src_node_id) + .ok_or_else(|| { + debug_assert!(false); + LightningError { err: "We should always have private source node counters available".to_owned(), action: ErrorAction::IgnoreError } + })?; + + if let Some((first_channels, _)) = first_hop_targets.get(target) { + let matches_an_scid = |d: &&ChannelDetails| + d.outbound_scid_alias == Some(hop.short_channel_id) || d.short_channel_id == Some(hop.short_channel_id); + if first_channels.iter().any(matches_an_scid) { + log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.", + hop.short_channel_id); + break; + } + } + + let candidate = network_channels + .get(&hop.short_channel_id) + .and_then(|channel| channel.as_directed_to(target)) + .map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate { + info, + short_channel_id: hop.short_channel_id, + })) + .unwrap_or_else(|| CandidateRouteHop::PrivateHop(PrivateHopCandidate { + hint: hop, target_node_id: target, + source_node_counter: *private_source_node_counter, + target_node_counter: *private_target_node_counter, + })); + + last_hop_candidates.entry(private_target_node_counter).or_insert_with(Vec::new).push(candidate); + } + } + // The main heap containing all candidate next-hops sorted by their score (max(fee, // htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of // adding duplicate entries when we find a better path to a given node. @@ -2383,6 +2442,19 @@ where L::Target: Logger { // We "return" whether we updated the path at the end, and how much we can route via // this channel, via this: let mut hop_contribution_amt_msat = None; + + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + if let Some(counter) = $candidate.target_node_counter() { + // Once we are adding paths backwards from a given target, we've selected the best + // path from that target to the destination and it should no longer change. We thus + // set the best-path selected flag and check that it doesn't change below. + if let Some(node) = &mut dist[counter as usize] { + node.best_path_from_hop_selected = true; + } else if counter != payee_node_counter { + panic!("No dist entry for target node counter {}", counter); + } + } + // Channels to self should not be used. This is more of belt-and-suspenders, because in // practice these cases should be caught earlier: // - for regular channels at channel announcement (TODO) @@ -2515,8 +2587,15 @@ where L::Target: Logger { let curr_min = cmp::max( $next_hops_path_htlc_minimum_msat, htlc_minimum_msat ); - let candidate_fees = $candidate.fees(); let src_node_counter = $candidate.src_node_counter(); + let mut candidate_fees = $candidate.fees(); + if src_node_counter == payer_node_counter { + // We do not charge ourselves a fee to use our own channels. + candidate_fees = RoutingFees { + proportional_millionths: 0, + base_msat: 0, + }; + } let path_htlc_minimum_msat = compute_fees_saturating(curr_min, candidate_fees) .saturating_add(curr_min); @@ -2539,7 +2618,9 @@ where L::Target: Logger { path_penalty_msat: u64::max_value(), was_processed: false, is_first_hop_target: false, + is_last_hop_target: false, #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: false, value_contribution_msat, }); dist_entry.as_mut().unwrap() @@ -2615,10 +2696,19 @@ where L::Target: Logger { .saturating_add(old_entry.path_penalty_msat); let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) .saturating_add(path_penalty_msat); + let should_replace = + new_cost < old_cost + || (new_cost == old_cost && old_entry.value_contribution_msat < value_contribution_msat); + + if !old_entry.was_processed && should_replace { + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + { + assert!(!old_entry.best_path_from_hop_selected); + } - if !old_entry.was_processed && new_cost < old_cost { let new_graph_node = RouteGraphNode { node_id: src_node_id, + node_counter: src_node_counter, score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), total_cltv_delta: hop_total_cltv_delta, value_contribution_msat, @@ -2632,10 +2722,7 @@ where L::Target: Logger { old_entry.fee_msat = 0; // This value will be later filled with hop_use_fee_msat of the following channel old_entry.path_htlc_minimum_msat = path_htlc_minimum_msat; old_entry.path_penalty_msat = path_penalty_msat; - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] - { - old_entry.value_contribution_msat = value_contribution_msat; - } + old_entry.value_contribution_msat = value_contribution_msat; hop_contribution_amt_msat = Some(value_contribution_msat); } else if old_entry.was_processed && new_cost < old_cost { #[cfg(all(not(ldk_bench), any(test, fuzzing)))] @@ -2697,19 +2784,20 @@ where L::Target: Logger { // meaning how much will be paid in fees after this node (to the best of our knowledge). // This data can later be helpful to optimize routing (pay lower fees). macro_rules! add_entries_to_cheapest_to_target_node { - ( $node: expr, $node_id: expr, $next_hops_value_contribution: expr, + ( $node: expr, $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => { let fee_to_target_msat; let next_hops_path_htlc_minimum_msat; let next_hops_path_penalty_msat; - let is_first_hop_target; - let skip_node = if let Some(elem) = &mut dist[$node.node_counter as usize] { + let (is_first_hop_target, is_last_hop_target); + let skip_node = if let Some(elem) = &mut dist[$node_counter as usize] { let was_processed = elem.was_processed; elem.was_processed = true; fee_to_target_msat = elem.total_fee_msat; next_hops_path_htlc_minimum_msat = elem.path_htlc_minimum_msat; next_hops_path_penalty_msat = elem.path_penalty_msat; is_first_hop_target = elem.is_first_hop_target; + is_last_hop_target = elem.is_last_hop_target; was_processed } else { // Entries are added to dist in add_entry!() when there is a channel from a node. @@ -2721,17 +2809,28 @@ where L::Target: Logger { next_hops_path_htlc_minimum_msat = 0; next_hops_path_penalty_msat = 0; is_first_hop_target = false; + is_last_hop_target = false; false }; if !skip_node { + if is_last_hop_target { + if let Some(candidates) = last_hop_candidates.get(&$node_counter) { + for candidate in candidates { + add_entry!(candidate, fee_to_target_msat, + $next_hops_value_contribution, + next_hops_path_htlc_minimum_msat, next_hops_path_penalty_msat, + $next_hops_cltv_delta, $next_hops_path_length); + } + } + } if is_first_hop_target { if let Some((first_channels, peer_node_counter)) = first_hop_targets.get(&$node_id) { for details in first_channels { - debug_assert_eq!(*peer_node_counter, $node.node_counter); + debug_assert_eq!(*peer_node_counter, $node_counter); let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: $node.node_counter, + target_node_counter: $node_counter, }); add_entry!(&candidate, fee_to_target_msat, $next_hops_value_contribution, @@ -2741,29 +2840,31 @@ where L::Target: Logger { } } - let features = if let Some(node_info) = $node.announcement_info.as_ref() { - node_info.features_ref() - } else { - &default_node_features - }; + if let Some(node) = $node { + let features = if let Some(node_info) = node.announcement_info.as_ref() { + node_info.features_ref() + } else { + &default_node_features + }; - if !features.requires_unknown_bits() { - for chan_id in $node.channels.iter() { - let chan = network_channels.get(chan_id).unwrap(); - if !chan.features.requires_unknown_bits() { - if let Some((directed_channel, source)) = chan.as_directed_to(&$node_id) { - if first_hops.is_none() || *source != our_node_id { - if directed_channel.direction().enabled { - let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate { - info: directed_channel, - short_channel_id: *chan_id, - }); - add_entry!(&candidate, - fee_to_target_msat, - $next_hops_value_contribution, - next_hops_path_htlc_minimum_msat, - next_hops_path_penalty_msat, - $next_hops_cltv_delta, $next_hops_path_length); + if !features.requires_unknown_bits() { + for chan_id in node.channels.iter() { + let chan = network_channels.get(chan_id).unwrap(); + if !chan.features.requires_unknown_bits() { + if let Some((directed_channel, source)) = chan.as_directed_to(&$node_id) { + if first_hops.is_none() || *source != our_node_id { + if directed_channel.direction().enabled { + let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate { + info: directed_channel, + short_channel_id: *chan_id, + }); + add_entry!(&candidate, + fee_to_target_msat, + $next_hops_value_contribution, + next_hops_path_htlc_minimum_msat, + next_hops_path_penalty_msat, + $next_hops_cltv_delta, $next_hops_path_length); + } } } } @@ -2784,13 +2885,23 @@ where L::Target: Logger { for e in dist.iter_mut() { *e = None; } + + // Step (2). + // Add entries for first-hop and last-hop channel hints to `dist` and add the payee node as + // the best entry via `add_entry`. + // For first- and last-hop hints we need only add dummy entries in `dist` with the relevant + // flags set. As we walk the graph in `add_entries_to_cheapest_to_target_node` we'll check + // those flags and add the channels described by the hints. + // We then either add the payee using `add_entries_to_cheapest_to_target_node` or add the + // blinded paths to the payee using `add_entry`, filling `targets` and setting us up for + // our graph walk. for (_, (chans, peer_node_counter)) in first_hop_targets.iter() { // In order to avoid looking up whether each node is a first-hop target, we store a // dummy entry in dist for each first-hop target, allowing us to do this lookup for // free since we're already looking at the `was_processed` flag. // - // Note that all the fields (except `is_first_hop_target`) will be overwritten whenever - // we find a path to the target, so are left as dummies here. + // Note that all the fields (except `is_{first,last}_hop_target`) will be overwritten + // whenever we find a path to the target, so are left as dummies here. dist[*peer_node_counter as usize] = Some(PathBuildingHop { candidate: CandidateRouteHop::FirstHop(FirstHopCandidate { details: &chans[0], @@ -2806,50 +2917,66 @@ where L::Target: Logger { path_penalty_msat: u64::max_value(), was_processed: false, is_first_hop_target: true, - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + is_last_hop_target: false, value_contribution_msat: 0, + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: false, }); } - hit_minimum_limit = false; - - // If first hop is a private channel and the only way to reach the payee, this is the only - // place where it could be added. - payee_node_id_opt.map(|payee| first_hop_targets.get(&payee).map(|(first_channels, peer_node_counter)| { - debug_assert_eq!(*peer_node_counter, payee_node_counter); - for details in first_channels { - let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: payee_node_counter, + for (target_node_counter, candidates) in last_hop_candidates.iter() { + // In order to avoid looking up whether each node is a last-hop target, we store a + // dummy entry in dist for each last-hop target, allowing us to do this lookup for + // free since we're already looking at the `was_processed` flag. + // + // Note that all the fields (except `is_{first,last}_hop_target`) will be overwritten + // whenever we find a path to the target, so are left as dummies here. + debug_assert!(!candidates.is_empty()); + if candidates.is_empty() { continue } + let entry = &mut dist[**target_node_counter as usize]; + if let Some(hop) = entry { + hop.is_last_hop_target = true; + } else { + *entry = Some(PathBuildingHop { + candidate: candidates[0].clone(), + fee_msat: 0, + next_hops_fee_msat: u64::max_value(), + hop_use_fee_msat: u64::max_value(), + total_fee_msat: u64::max_value(), + path_htlc_minimum_msat: u64::max_value(), + path_penalty_msat: u64::max_value(), + was_processed: false, + is_first_hop_target: false, + is_last_hop_target: true, + value_contribution_msat: 0, + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: false, }); - let added = add_entry!(&candidate, 0, path_value_msat, - 0, 0u64, 0, 0).is_some(); - log_trace!(logger, "{} direct route to payee via {}", - if added { "Added" } else { "Skipped" }, LoggedCandidateHop(&candidate)); } - })); + } + hit_minimum_limit = false; - // Add the payee as a target, so that the payee-to-payer - // search algorithm knows what to start with. - payee_node_id_opt.map(|payee| match network_nodes.get(&payee) { - // The payee is not in our network graph, so nothing to add here. - // There is still a chance of reaching them via last_hops though, - // so don't yet fail the payment here. - // If not, targets.pop() will not even let us enter the loop in step 2. - None => {}, - Some(node) => { - add_entries_to_cheapest_to_target_node!(node, payee, path_value_msat, 0, 0); - }, - }); + if let Some(payee) = payee_node_id_opt { + if let Some(entry) = &mut dist[payee_node_counter as usize] { + // If we built a dummy entry above we need to reset the values to represent 0 fee + // from the target "to the target". + entry.next_hops_fee_msat = 0; + entry.hop_use_fee_msat = 0; + entry.total_fee_msat = 0; + entry.path_htlc_minimum_msat = 0; + entry.path_penalty_msat = 0; + entry.value_contribution_msat = path_value_msat; + } + add_entries_to_cheapest_to_target_node!( + network_nodes.get(&payee), payee_node_counter, payee, path_value_msat, 0, 0 + ); + } - // Step (2). - // If a caller provided us with last hops, add them to routing targets. Since this happens - // earlier than general path finding, they will be somewhat prioritized, although currently - // it matters only if the fees are exactly the same. debug_assert_eq!( payment_params.payee.blinded_route_hints().len(), introduction_node_id_cache.len(), "introduction_node_id_cache was built by iterating the blinded_route_hints, so they should be the same len" ); + let mut blind_intros_added = hash_map_with_capacity(payment_params.payee.blinded_route_hints().len()); for (hint_idx, hint) in payment_params.payee.blinded_route_hints().iter().enumerate() { // Only add the hops in this route to our candidate set if either // we have a direct channel to the first hop or the first hop is @@ -2864,12 +2991,21 @@ where L::Target: Logger { } else { CandidateRouteHop::Blinded(BlindedPathCandidate { source_node_counter, source_node_id, hint, hint_idx }) }; - let mut path_contribution_msat = path_value_msat; if let Some(hop_used_msat) = add_entry!(&candidate, - 0, path_contribution_msat, 0, 0_u64, 0, 0) + 0, path_value_msat, 0, 0_u64, 0, 0) { - path_contribution_msat = hop_used_msat; + blind_intros_added.insert(source_node_id, (hop_used_msat, candidate)); } else { continue } + } + // If we added a blinded path from an introduction node to the destination, where the + // introduction node is one of our direct peers, we need to scan our `first_channels` + // to detect this. However, doing so immediately after calling `add_entry`, above, could + // result in incorrect behavior if we, in a later loop iteration, update the fee from the + // same introduction point to the destination (due to a different blinded path with the + // same introduction point having a lower score). + // Thus, we track the nodes that we added paths from in `blind_intros_added` and scan for + // introduction points we have a channel with after processing all blinded paths. + for (source_node_id, (path_contribution_msat, candidate)) in blind_intros_added { if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(source_node_id) { sort_first_hop_channels( first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey @@ -2890,165 +3026,6 @@ where L::Target: Logger { } } } - for route in payment_params.payee.unblinded_route_hints().iter() - .filter(|route| !route.0.is_empty()) - { - let first_hop_src_id = NodeId::from_pubkey(&route.0.first().unwrap().src_node_id); - let first_hop_src_is_reachable = - // Only add the hops in this route to our candidate set if either we are part of - // the first hop, we have a direct channel to the first hop, or the first hop is in - // the regular network graph. - our_node_id == first_hop_src_id || - first_hop_targets.get(&first_hop_src_id).is_some() || - network_nodes.get(&first_hop_src_id).is_some(); - if first_hop_src_is_reachable { - // We start building the path from reverse, i.e., from payee - // to the first RouteHintHop in the path. - let hop_iter = route.0.iter().rev(); - let prev_hop_iter = core::iter::once(&maybe_dummy_payee_pk).chain( - route.0.iter().skip(1).rev().map(|hop| &hop.src_node_id)); - let mut hop_used = true; - let mut aggregate_next_hops_fee_msat: u64 = 0; - let mut aggregate_next_hops_path_htlc_minimum_msat: u64 = 0; - let mut aggregate_next_hops_path_penalty_msat: u64 = 0; - let mut aggregate_next_hops_cltv_delta: u32 = 0; - let mut aggregate_next_hops_path_length: u8 = 0; - let mut aggregate_path_contribution_msat = path_value_msat; - - for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() { - let (target, private_target_node_counter) = - node_counters.private_node_counter_from_pubkey(&prev_hop_id) - .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here"); - let (_src_id, private_source_node_counter) = - node_counters.private_node_counter_from_pubkey(&hop.src_node_id) - .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here"); - - if let Some((first_channels, _)) = first_hop_targets.get(target) { - if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) { - log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.", - hop.short_channel_id); - break; - } - } - - let candidate = network_channels - .get(&hop.short_channel_id) - .and_then(|channel| channel.as_directed_to(target)) - .map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate { - info, - short_channel_id: hop.short_channel_id, - })) - .unwrap_or_else(|| CandidateRouteHop::PrivateHop(PrivateHopCandidate { - hint: hop, target_node_id: target, - source_node_counter: *private_source_node_counter, - target_node_counter: *private_target_node_counter, - })); - - if let Some(hop_used_msat) = add_entry!(&candidate, - aggregate_next_hops_fee_msat, aggregate_path_contribution_msat, - aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat, - aggregate_next_hops_cltv_delta, aggregate_next_hops_path_length) - { - aggregate_path_contribution_msat = hop_used_msat; - } else { - // If this hop was not used then there is no use checking the preceding - // hops in the RouteHint. We can break by just searching for a direct - // channel between last checked hop and first_hop_targets. - hop_used = false; - } - - let used_liquidity_msat = used_liquidities - .get(&candidate.id()).copied() - .unwrap_or(0); - let channel_usage = ChannelUsage { - amount_msat: final_value_msat + aggregate_next_hops_fee_msat, - inflight_htlc_msat: used_liquidity_msat, - effective_capacity: candidate.effective_capacity(), - }; - let channel_penalty_msat = scorer.channel_penalty_msat( - &candidate, channel_usage, score_params - ); - aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat - .saturating_add(channel_penalty_msat); - - aggregate_next_hops_cltv_delta = aggregate_next_hops_cltv_delta - .saturating_add(hop.cltv_expiry_delta as u32); - - aggregate_next_hops_path_length = aggregate_next_hops_path_length - .saturating_add(1); - - // Searching for a direct channel between last checked hop and first_hop_targets - if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(target) { - sort_first_hop_channels( - first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey - ); - for details in first_channels { - let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: *peer_node_counter, - }); - add_entry!(&first_hop_candidate, - aggregate_next_hops_fee_msat, aggregate_path_contribution_msat, - aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat, - aggregate_next_hops_cltv_delta, aggregate_next_hops_path_length); - } - } - - if !hop_used { - break; - } - - // In the next values of the iterator, the aggregate fees already reflects - // the sum of value sent from payer (final_value_msat) and routing fees - // for the last node in the RouteHint. We need to just add the fees to - // route through the current node so that the preceding node (next iteration) - // can use it. - let hops_fee = compute_fees(aggregate_next_hops_fee_msat + final_value_msat, hop.fees) - .map_or(None, |inc| inc.checked_add(aggregate_next_hops_fee_msat)); - aggregate_next_hops_fee_msat = if let Some(val) = hops_fee { val } else { break; }; - - // The next channel will need to relay this channel's min_htlc *plus* the fees taken by - // this route hint's source node to forward said min over this channel. - aggregate_next_hops_path_htlc_minimum_msat = { - let curr_htlc_min = cmp::max( - candidate.htlc_minimum_msat(), aggregate_next_hops_path_htlc_minimum_msat - ); - let curr_htlc_min_fee = if let Some(val) = compute_fees(curr_htlc_min, hop.fees) { val } else { break }; - if let Some(min) = curr_htlc_min.checked_add(curr_htlc_min_fee) { min } else { break } - }; - - if idx == route.0.len() - 1 { - // The last hop in this iterator is the first hop in - // overall RouteHint. - // If this hop connects to a node with which we have a direct channel, - // ignore the network graph and, if the last hop was added, add our - // direct channel to the candidate set. - // - // Note that we *must* check if the last hop was added as `add_entry` - // always assumes that the third argument is a node to which we have a - // path. - if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(&NodeId::from_pubkey(&hop.src_node_id)) { - sort_first_hop_channels( - first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey - ); - for details in first_channels { - let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: *peer_node_counter, - }); - add_entry!(&first_hop_candidate, - aggregate_next_hops_fee_msat, - aggregate_path_contribution_msat, - aggregate_next_hops_path_htlc_minimum_msat, - aggregate_next_hops_path_penalty_msat, - aggregate_next_hops_cltv_delta, - aggregate_next_hops_path_length); - } - } - } - } - } - } log_trace!(logger, "Starting main path collection loop with {} nodes pre-filled from first/last hops.", targets.len()); @@ -3065,7 +3042,7 @@ where L::Target: Logger { // Both these cases (and other cases except reaching recommended_value_msat) mean that // paths_collection will be stopped because found_new_path==false. // This is not necessarily a routing failure. - 'path_construction: while let Some(RouteGraphNode { node_id, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { + 'path_construction: while let Some(RouteGraphNode { node_id, node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { // Since we're going payee-to-payer, hitting our node as a target means we should stop // traversing the graph and arrange the path out of what we found. @@ -3200,14 +3177,11 @@ where L::Target: Logger { // Otherwise, since the current target node is not us, // keep "unrolling" the payment graph from payee to payer by // finding a way to reach the current target from the payer side. - match network_nodes.get(&node_id) { - None => {}, - Some(node) => { - add_entries_to_cheapest_to_target_node!(node, node_id, - value_contribution_msat, - total_cltv_delta, path_length_to_node); - }, - } + add_entries_to_cheapest_to_target_node!( + network_nodes.get(&node_id), node_counter, node_id, + value_contribution_msat, + total_cltv_delta, path_length_to_node + ); } if !allow_mpp { @@ -5735,187 +5709,33 @@ mod tests { } #[test] - fn long_mpp_route_test() { - let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); - let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); - let scorer = ln_test_utils::TestScorer::new(); - let random_seed_bytes = [42; 32]; - let config = UserConfig::default(); - let payment_params = PaymentParameters::from_node_id(nodes[3], 42) - .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) - .unwrap(); - - // We need a route consisting of 3 paths: - // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}. - // Note that these paths overlap (channels 5, 12, 13). - // We will route 300 sats. - // Each path will have 100 sats capacity, those channels which - // are used twice will have 200 sats capacity. - - // Disable other potential paths. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 2, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 2, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 7, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 2, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Path via {node0, node2} is channels {1, 3, 5}. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 1, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 3, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Capacity of 200 sats because this channel will be used by 3rd path as well. - add_channel(&gossip_sync, &secp_ctx, &privkeys[2], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(5)), 5); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[3], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 3, // disable direction 1 - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Path via {node7, node2, node4} is channels {12, 13, 6, 11}. - // Add 100 sats to the capacities of {12, 13}, because these channels - // are also used for 3rd path. 100 sats for the rest. Total capacity: 100 sats. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 12, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[7], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 13, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 6, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[4], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 11, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Path via {node7, node2} is channels {12, 13, 5}. - // We already limited them to 200 sats (they are used twice for 100 sats). - // Nothing to do here. - + fn mpp_tests() { + let secp_ctx = Secp256k1::new(); + let (_, _, _, nodes) = get_nodes(&secp_ctx); { - // Attempt to route more than available results in a failure. - let route_params = RouteParameters::from_payment_params_and_value( - payment_params.clone(), 350_000); - if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route( - &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger), - &scorer, &Default::default(), &random_seed_bytes) { - assert_eq!(err, "Failed to find a sufficient route to the given destination"); - } else { panic!(); } - } + // Check that if we have two cheaper paths and a more expensive (fewer hops) path, we + // choose the two cheaper paths: + let route = do_mpp_route_tests(180_000).unwrap(); + assert_eq!(route.paths.len(), 2); + let mut total_value_transferred_msat = 0; + let mut total_paid_msat = 0; + for path in &route.paths { + assert_eq!(path.hops.last().unwrap().pubkey, nodes[3]); + total_value_transferred_msat += path.final_value_msat(); + for hop in &path.hops { + total_paid_msat += hop.fee_msat; + } + } + // If we paid fee, this would be higher. + assert_eq!(total_value_transferred_msat, 180_000); + let total_fees_paid = total_paid_msat - total_value_transferred_msat; + assert_eq!(total_fees_paid, 0); + } { - // Now, attempt to route 300 sats (exact amount we can route). - // Our algorithm should provide us with these 3 paths, 100 sats each. - let route_params = RouteParameters::from_payment_params_and_value( - payment_params, 300_000); - let route = get_route(&our_id, &route_params, &network_graph.read_only(), None, - Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap(); + // Check that if we use the same channels but need to send more than we could fit in + // the cheaper paths we select all three paths: + let route = do_mpp_route_tests(300_000).unwrap(); assert_eq!(route.paths.len(), 3); let mut total_amount_paid_msat = 0; @@ -5925,11 +5745,11 @@ mod tests { } assert_eq!(total_amount_paid_msat, 300_000); } - + // Check that trying to pay more than our available liquidity fails. + assert!(do_mpp_route_tests(300_001).is_err()); } - #[test] - fn mpp_cheaper_route_test() { + fn do_mpp_route_tests(amt: u64) -> Result { let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); let scorer = ln_test_utils::TestScorer::new(); @@ -5939,21 +5759,17 @@ mod tests { .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) .unwrap(); - // This test checks that if we have two cheaper paths and one more expensive path, - // so that liquidity-wise any 2 of 3 combination is sufficient, - // two cheaper paths will be taken. - // These paths have equal available liquidity. - - // We need a combination of 3 paths: - // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}. - // Note that these paths overlap (channels 5, 12, 13). - // Each path will have 100 sats capacity, those channels which - // are used twice will have 200 sats capacity. + // Build a setup where we have three potential paths from us to node3: + // {node0, node2, node4} (channels 1, 3, 6, 11), fee 0 msat, + // {node7, node2, node4} (channels 12, 13, 6, 11), fee 0 msat, and + // {node1} (channel 2, then a new channel 16), fee 1000 msat. + // Note that these paths overlap on channels 6 and 11. + // Each channel will have 100 sats capacity except for 6 and 11, which have 200. // Disable other potential paths. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 2, + short_channel_id: 7, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 2, @@ -5964,9 +5780,9 @@ mod tests { fee_proportional_millionths: 0, excess_data: Vec::new() }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 7, + short_channel_id: 4, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 2, @@ -6006,31 +5822,30 @@ mod tests { excess_data: Vec::new() }); - // Capacity of 200 sats because this channel will be used by 3rd path as well. - add_channel(&gossip_sync, &secp_ctx, &privkeys[2], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(5)), 5); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { + add_channel(&gossip_sync, &secp_ctx, &privkeys[1], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(16)), 16); + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, + short_channel_id: 16, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, + htlc_maximum_msat: 100_000, + fee_base_msat: 1_000, fee_proportional_millionths: 0, excess_data: Vec::new() }); update_channel(&gossip_sync, &secp_ctx, &privkeys[3], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, + short_channel_id: 16, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 3, // disable direction 1 cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, + htlc_maximum_msat: 100_000, + fee_base_msat: 1_000, fee_proportional_millionths: 0, excess_data: Vec::new() }); @@ -6046,7 +5861,7 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, + htlc_maximum_msat: 100_000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() @@ -6059,7 +5874,7 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, + htlc_maximum_msat: 100_000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() @@ -6073,8 +5888,8 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 1_000, + htlc_maximum_msat: 200_000, + fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() }); @@ -6086,7 +5901,7 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, + htlc_maximum_msat: 200_000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() @@ -6096,29 +5911,11 @@ mod tests { // We already limited them to 200 sats (they are used twice for 100 sats). // Nothing to do here. - { - // Now, attempt to route 180 sats. - // Our algorithm should provide us with these 2 paths. - let route_params = RouteParameters::from_payment_params_and_value( - payment_params, 180_000); - let route = get_route(&our_id, &route_params, &network_graph.read_only(), None, - Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap(); - assert_eq!(route.paths.len(), 2); - - let mut total_value_transferred_msat = 0; - let mut total_paid_msat = 0; - for path in &route.paths { - assert_eq!(path.hops.last().unwrap().pubkey, nodes[3]); - total_value_transferred_msat += path.final_value_msat(); - for hop in &path.hops { - total_paid_msat += hop.fee_msat; - } - } - // If we paid fee, this would be higher. - assert_eq!(total_value_transferred_msat, 180_000); - let total_fees_paid = total_paid_msat - total_value_transferred_msat; - assert_eq!(total_fees_paid, 0); - } + let route_params = RouteParameters::from_payment_params_and_value( + payment_params, amt); + let res = get_route(&our_id, &route_params, &network_graph.read_only(), None, + Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes); + res } #[test] diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 112f881abe0..8ec9aa64323 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -828,16 +828,6 @@ struct ChannelLiquidity { offset_history_last_updated: Duration, } -// Check that the liquidity HashMap's entries sit on round cache lines. -// -// Specifically, the first cache line will have the key, the liquidity offsets, and the total -// points tracked in the historical tracker. -// -// The next two cache lines will have the historical points, which we only access last during -// scoring, followed by the last_updated `Duration`s (which we do not need during scoring). -const _LIQUIDITY_MAP_SIZING_CHECK: usize = 192 - ::core::mem::size_of::<(u64, ChannelLiquidity)>(); -const _LIQUIDITY_MAP_SIZING_CHECK_2: usize = ::core::mem::size_of::<(u64, ChannelLiquidity)>() - 192; - /// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity. struct DirectedChannelLiquidity, HT: Deref, T: Deref> { min_liquidity_offset_msat: L, @@ -1813,13 +1803,21 @@ mod bucketed_history { self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE); } } + + /// Applies decay at the given half-life to all buckets. + fn decay(&mut self, half_lives: f64) { + let factor = (1024.0 * powf64(0.5, half_lives)) as u64; + for bucket in self.buckets.iter_mut() { + *bucket = ((*bucket as u64) * factor / 1024) as u16; + } + } } impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) }); impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) }); #[derive(Clone, Copy)] - #[repr(C)] // Force the fields in memory to be in the order we specify. + #[repr(C)]// Force the fields in memory to be in the order we specify. pub(super) struct HistoricalLiquidityTracker { // This struct sits inside a `(u64, ChannelLiquidity)` in memory, and we first read the // liquidity offsets in `ChannelLiquidity` when calculating the non-historical score. This @@ -1867,26 +1865,23 @@ mod bucketed_history { } pub(super) fn decay_buckets(&mut self, half_lives: f64) { - let divisor = powf64(2048.0, half_lives) as u64; - for bucket in self.min_liquidity_offset_history.buckets.iter_mut() { - *bucket = ((*bucket as u64) * 1024 / divisor) as u16; - } - for bucket in self.max_liquidity_offset_history.buckets.iter_mut() { - *bucket = ((*bucket as u64) * 1024 / divisor) as u16; - } + self.min_liquidity_offset_history.decay(half_lives); + self.max_liquidity_offset_history.decay(half_lives); self.recalculate_valid_point_count(); } fn recalculate_valid_point_count(&mut self) { - let mut total_valid_points_tracked = 0; + let mut total_valid_points_tracked = 0u128; for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) { // In testing, raising the weights of buckets to a high power led to better // scoring results. Thus, we raise the bucket weights to the 4th power here (by - // squaring the result of multiplying the weights). + // squaring the result of multiplying the weights). This results in + // bucket_weight having at max 64 bits, which means we have to do our summation + // in 128-bit math. let mut bucket_weight = (*min_bucket as u64) * (*max_bucket as u64); bucket_weight *= bucket_weight; - total_valid_points_tracked += bucket_weight; + total_valid_points_tracked += bucket_weight as u128; } } self.total_valid_points_tracked = total_valid_points_tracked as f64; @@ -1972,12 +1967,12 @@ mod bucketed_history { let total_valid_points_tracked = self.tracker.total_valid_points_tracked; #[cfg(debug_assertions)] { - let mut actual_valid_points_tracked = 0; + let mut actual_valid_points_tracked = 0u128; for (min_idx, min_bucket) in min_liquidity_offset_history_buckets.iter().enumerate() { for max_bucket in max_liquidity_offset_history_buckets.iter().take(32 - min_idx) { let mut bucket_weight = (*min_bucket as u64) * (*max_bucket as u64); bucket_weight *= bucket_weight; - actual_valid_points_tracked += bucket_weight; + actual_valid_points_tracked += bucket_weight as u128; } } assert_eq!(total_valid_points_tracked, actual_valid_points_tracked as f64); @@ -2004,7 +1999,7 @@ mod bucketed_history { // max-bucket with at least BUCKET_FIXED_POINT_ONE. let mut highest_max_bucket_with_points = 0; let mut highest_max_bucket_with_full_points = None; - let mut total_weight = 0; + let mut total_weight = 0u128; for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate() { if *max_bucket >= BUCKET_FIXED_POINT_ONE { highest_max_bucket_with_full_points = Some(cmp::max(highest_max_bucket_with_full_points.unwrap_or(0), max_idx)); @@ -2017,7 +2012,7 @@ mod bucketed_history { // squaring the result of multiplying the weights), matching the logic in // `recalculate_valid_point_count`. let bucket_weight = (*max_bucket as u64) * (min_liquidity_offset_history_buckets[0] as u64); - total_weight += bucket_weight * bucket_weight; + total_weight += (bucket_weight * bucket_weight) as u128; } debug_assert!(total_weight as f64 <= total_valid_points_tracked); // Use the highest max-bucket with at least BUCKET_FIXED_POINT_ONE, but if none is @@ -2067,6 +2062,50 @@ mod bucketed_history { Some((cumulative_success_prob * (1024.0 * 1024.0 * 1024.0)) as u64) } } + + #[cfg(test)] + mod tests { + use super::{HistoricalBucketRangeTracker, HistoricalLiquidityTracker, ProbabilisticScoringFeeParameters}; + + #[test] + fn historical_liquidity_bucket_decay() { + let mut bucket = HistoricalBucketRangeTracker::new(); + bucket.track_datapoint(100, 1000); + assert_eq!( + bucket.buckets, + [ + 0u16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + ] + ); + + bucket.decay(2.0); + assert_eq!( + bucket.buckets, + [ + 0u16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + ] + ); + } + + #[test] + fn historical_heavy_buckets_operations() { + // Checks that we don't hit overflows when working with tons of data (even an + // impossible-to-reach amount of data). + let mut tracker = HistoricalLiquidityTracker::new(); + tracker.min_liquidity_offset_history.buckets = [0xffff; 32]; + tracker.max_liquidity_offset_history.buckets = [0xffff; 32]; + tracker.recalculate_valid_point_count(); + + let mut directed = tracker.as_directed_mut(true); + let default_params = ProbabilisticScoringFeeParameters::default(); + directed.calculate_success_probability_times_billion(&default_params, 42, 1000); + directed.track_datapoint(42, 52, 1000); + + tracker.decay_buckets(1.0); + } + } } use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, DirectedHistoricalLiquidityTracker, HistoricalLiquidityTracker}; diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index f142328e45c..991a71ffbe0 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -5,15 +5,16 @@ use core::time::Duration; use std::cell::RefCell; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Condvar as StdCondvar; -use std::sync::Mutex as StdMutex; -use std::sync::MutexGuard as StdMutexGuard; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::RwLock as StdRwLock; use std::sync::RwLockReadGuard as StdRwLockReadGuard; use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; -pub use std::sync::WaitTimeoutResult; +use parking_lot::Condvar as StdCondvar; +use parking_lot::Mutex as StdMutex; +use parking_lot::MutexGuard as StdMutexGuard; + +pub use parking_lot::WaitTimeoutResult; use crate::prelude::*; @@ -46,10 +47,9 @@ impl Condvar { &'a self, guard: MutexGuard<'a, T>, condition: F, ) -> LockResult> { let mutex: &'a Mutex = guard.mutex; - self.inner - .wait_while(guard.into_inner(), condition) - .map(|lock| MutexGuard { mutex, lock }) - .map_err(|_| ()) + let mut lock = guard.into_inner(); + self.inner.wait_while(&mut lock, condition); + Ok(MutexGuard { mutex, lock: Some(lock) }) } #[allow(unused)] @@ -57,10 +57,9 @@ impl Condvar { &'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F, ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { let mutex = guard.mutex; - self.inner - .wait_timeout_while(guard.into_inner(), dur, condition) - .map_err(|_| ()) - .map(|(lock, e)| (MutexGuard { mutex, lock }, e)) + let mut lock = guard.into_inner(); + let e = self.inner.wait_while_for(&mut lock, condition, dur); + Ok((MutexGuard { mutex, lock: Some(lock) }, e)) } pub fn notify_all(&self) { @@ -150,7 +149,7 @@ impl LockMetadata { LOCKS_INIT.call_once(|| unsafe { LOCKS = Some(StdMutex::new(new_hash_map())); }); - let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap(); + let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock(); match locks.entry(lock_constr_location) { hash_map::Entry::Occupied(e) => { assert_eq!(lock_constr_colno, @@ -185,7 +184,7 @@ impl LockMetadata { } } for (_locked_idx, locked) in held.borrow().iter() { - for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() { + for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().iter() { let is_dep_this_lock = *locked_dep_idx == this.lock_idx; let has_same_construction = *locked_dep_idx == locked.lock_idx; if is_dep_this_lock && !has_same_construction { @@ -210,7 +209,7 @@ impl LockMetadata { } } // Insert any already-held locks in our locked-before set. - let mut locked_before = this.locked_before.lock().unwrap(); + let mut locked_before = this.locked_before.lock(); if !locked_before.contains_key(&locked.lock_idx) { let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() }; locked_before.insert(lockdep.lock.lock_idx, lockdep); @@ -237,7 +236,7 @@ impl LockMetadata { // Since a try-lock will simply fail if the lock is held already, we do not // consider try-locks to ever generate lockorder inversions. However, if a try-lock // succeeds, we do consider it to have created lockorder dependencies. - let mut locked_before = this.locked_before.lock().unwrap(); + let mut locked_before = this.locked_before.lock(); for (locked_idx, locked) in held.borrow().iter() { if !locked_before.contains_key(locked_idx) { let lockdep = @@ -252,11 +251,17 @@ impl LockMetadata { pub struct Mutex { inner: StdMutex, + poisoned: AtomicBool, deps: Arc, } + impl Mutex { pub(crate) fn into_inner(self) -> LockResult { - self.inner.into_inner().map_err(|_| ()) + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(self.inner.into_inner()) + } } } @@ -278,14 +283,14 @@ impl fmt::Debug for Mutex { #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: Sized + 'a> { mutex: &'a Mutex, - lock: StdMutexGuard<'a, T>, + lock: Option>, } impl<'a, T: Sized> MutexGuard<'a, T> { fn into_inner(self) -> StdMutexGuard<'a, T> { // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509. unsafe { - let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock); + let v: StdMutexGuard<'a, T> = std::ptr::read(self.lock.as_ref().unwrap()); std::mem::forget(self); v } @@ -297,6 +302,10 @@ impl Drop for MutexGuard<'_, T> { LOCKS_HELD.with(|held| { held.borrow_mut().remove(&self.mutex.deps.lock_idx); }); + if std::thread::panicking() { + self.mutex.poisoned.store(true, Ordering::Release); + } + StdMutexGuard::unlock_fair(self.lock.take().unwrap()); } } @@ -304,37 +313,52 @@ impl Deref for MutexGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.lock.deref() + &self.lock.as_ref().unwrap().deref() } } impl DerefMut for MutexGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { - self.lock.deref_mut() + self.lock.as_mut().unwrap().deref_mut() } } impl Mutex { pub fn new(inner: T) -> Mutex { - Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() } + Mutex { + inner: StdMutex::new(inner), + poisoned: AtomicBool::new(false), + deps: LockMetadata::new(), + } } pub fn lock<'a>(&'a self) -> LockResult> { LockMetadata::pre_lock(&self.deps, false); - self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) + let lock = self.inner.lock(); + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(MutexGuard { mutex: self, lock: Some(lock) }) + } } pub fn try_lock<'a>(&'a self) -> LockResult> { - let res = - self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); + let res = self.inner.try_lock().ok_or(()); if res.is_ok() { + if self.poisoned.load(Ordering::Acquire) { + return Err(()); + } LockMetadata::try_locked(&self.deps); } - res + res.map(|lock| MutexGuard { mutex: self, lock: Some(lock) }) } pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> { - self.inner.get_mut().map_err(|_| ()) + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(self.inner.get_mut()) + } } } @@ -345,9 +369,10 @@ impl<'a, T: 'a> LockTestExt<'a> for Mutex { } type ExclLock = MutexGuard<'a, T>; #[inline] - fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard { + fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<'a, T> { LockMetadata::pre_lock(&self.deps, true); - self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap() + let lock = self.inner.lock(); + MutexGuard { mutex: self, lock: Some(lock) } } } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 5caafaddb34..4c6aac68600 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -333,11 +333,29 @@ impl SignerProvider for OnlyReadsKeysInterface { fn get_shutdown_scriptpubkey(&self) -> Result { Err(()) } } +#[cfg(feature = "std")] +pub trait SyncBroadcaster: chaininterface::BroadcasterInterface + Sync {} +#[cfg(feature = "std")] +pub trait SyncPersist: chainmonitor::Persist + Sync {} +#[cfg(feature = "std")] +impl SyncBroadcaster for T {} +#[cfg(feature = "std")] +impl + Sync> SyncPersist for T {} + +#[cfg(not(feature = "std"))] +pub trait SyncBroadcaster: chaininterface::BroadcasterInterface {} +#[cfg(not(feature = "std"))] +pub trait SyncPersist: chainmonitor::Persist {} +#[cfg(not(feature = "std"))] +impl SyncBroadcaster for T {} +#[cfg(not(feature = "std"))] +impl> SyncPersist for T {} + pub struct TestChainMonitor<'a> { pub added_monitors: Mutex)>>, pub monitor_updates: Mutex>>, pub latest_monitor_update_id: Mutex>, - pub chain_monitor: chainmonitor::ChainMonitor>, + pub chain_monitor: chainmonitor::ChainMonitor, pub keys_manager: &'a TestKeysInterface, /// If this is set to Some(), the next update_channel call (not watch_channel) must be a /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given @@ -346,9 +364,11 @@ pub struct TestChainMonitor<'a> { /// If this is set to Some(), the next round trip serialization check will not hold after an /// update_channel call (not watch_channel) for the given channel_id. pub expect_monitor_round_trip_fail: Mutex>, + #[cfg(feature = "std")] + pub write_blocker: Mutex>>, } impl<'a> TestChainMonitor<'a> { - pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist, keys_manager: &'a TestKeysInterface) -> Self { + pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn SyncBroadcaster, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn SyncPersist, keys_manager: &'a TestKeysInterface) -> Self { Self { added_monitors: Mutex::new(Vec::new()), monitor_updates: Mutex::new(new_hash_map()), @@ -357,6 +377,8 @@ impl<'a> TestChainMonitor<'a> { keys_manager, expect_channel_force_closed: Mutex::new(None), expect_monitor_round_trip_fail: Mutex::new(None), + #[cfg(feature = "std")] + write_blocker: Mutex::new(None), } } @@ -367,6 +389,11 @@ impl<'a> TestChainMonitor<'a> { } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let mut w = TestVecWriter(Vec::new()); @@ -381,6 +408,11 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { } fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap(); @@ -1446,18 +1478,19 @@ impl Drop for TestChainSource { pub struct TestScorer { /// Stores a tuple of (scid, ChannelUsage) - scorer_expectations: RefCell>>, + scorer_expectations: Mutex>>, } impl TestScorer { pub fn new() -> Self { Self { - scorer_expectations: RefCell::new(None), + scorer_expectations: Mutex::new(None), } } pub fn expect_usage(&self, scid: u64, expectation: ChannelUsage) { - self.scorer_expectations.borrow_mut().get_or_insert_with(|| VecDeque::new()).push_back((scid, expectation)); + let mut expectations = self.scorer_expectations.lock().unwrap(); + expectations.get_or_insert_with(|| VecDeque::new()).push_back((scid, expectation)); } } @@ -1476,7 +1509,7 @@ impl ScoreLookUp for TestScorer { Some(scid) => scid, None => return 0, }; - if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() { + if let Some(scorer_expectations) = self.scorer_expectations.lock().unwrap().as_mut() { match scorer_expectations.pop_front() { Some((scid, expectation)) => { assert_eq!(expectation, usage); @@ -1510,7 +1543,7 @@ impl Drop for TestScorer { return; } - if let Some(scorer_expectations) = self.scorer_expectations.borrow().as_ref() { + if let Some(scorer_expectations) = self.scorer_expectations.lock().unwrap().as_ref() { if !scorer_expectations.is_empty() { panic!("Unsatisfied scorer expectations: {:?}", scorer_expectations) }