From 31a43f6836afc20994a0429442d4a63badd140ba Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 26 Aug 2022 15:10:54 +0200 Subject: [PATCH 01/20] Initial LdkLite architecture and project layout --- .github/workflows/build.yml | 27 ++ Cargo.toml | 34 ++ LICENSE-APACHE | 201 ++++++++++ LICENSE-MIT | 16 + LICENSE.md | 14 + rustfmt.toml | 5 + src/access.rs | 320 ++++++++++++++++ src/error.rs | 106 ++++++ src/event.rs | 134 +++++++ src/lib.rs | 704 ++++++++++++++++++++++++++++++++++++ src/logger.rs | 38 ++ src/util.rs | 175 +++++++++ 12 files changed, 1774 insertions(+) create mode 100644 .github/workflows/build.yml create mode 100644 Cargo.toml create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT create mode 100644 LICENSE.md create mode 100644 rustfmt.toml create mode 100644 src/access.rs create mode 100644 src/error.rs create mode 100644 src/event.rs create mode 100644 src/lib.rs create mode 100644 src/logger.rs create mode 100644 src/util.rs diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..1a70ea03f --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,27 @@ +name: Continuous Integration Checks + +on: [push, pull_request] + +jobs: + build: + strategy: + matrix: + toolchain: [ stable, beta ] + include: + - toolchain: stable + check-fmt: true + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v2 + - name: Install Rust ${{ matrix.toolchain }} toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ matrix.toolchain }} + override: true + profile: minimal + - name: Build on Rust ${{ matrix.toolchain }} + run: cargo build --verbose --color always + - name: Check formatting + if: matrix.check-fmt + run: rustup component add rustfmt && cargo fmt --all -- --check diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..a5769df19 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "ldk-lite" +version = "0.1.0" +authors = ["Elias Rohrer "] +license = "MIT OR Apache-2.0" +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +lightning = { version = "0.0.110", features = ["max_level_trace", "std"] } +lightning-block-sync = { version = "0.0.110", features = [ "rpc-client" ] } +lightning-invoice = { version = "0.18" } +lightning-net-tokio = { version = "0.0.110" } +lightning-persister = { version = "0.0.110" } +lightning-background-processor = { version = "0.0.110" } +lightning-rapid-gossip-sync = { version = "0.0.110" } + +#bdk = "0.20.0" +bdk = { git = "https://github.com/tnull/bdk", branch="feat/use-external-esplora-client", features = ["use-esplora-ureq"]} +bitcoin = "0.28.1" + +rand = "0.8.5" +chrono = "0.4" +futures = "0.3" +serde_json = { version = "1.0" } +tokio = { version = "1", features = [ "io-util", "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } + + +[profile.release] +panic = "abort" + +[profile.dev] +panic = "abort" diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..9d982a4d6 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,16 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 000000000..c3f44cabd --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,14 @@ +This software is licensed under [Apache 2.0](LICENSE-APACHE) or +[MIT](LICENSE-MIT), at your option. + +Some files retain their own copyright notice, however, for full authorship +information, see version control history. + +Except as otherwise noted in individual files, all files in this repository are +licensed under the Apache License, Version 2.0 or the MIT license , at your option. + +You may not use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of this software or any files in this repository except in +accordance with one or both of these licenses. diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 000000000..c00f65546 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,5 @@ +hard_tabs = true # use tab characters for indentation, spaces for alignment +use_field_init_shorthand = true +max_width = 100 +use_small_heuristics = "Max" +fn_args_layout = "Compressed" diff --git a/src/access.rs b/src/access.rs new file mode 100644 index 000000000..a38b60eca --- /dev/null +++ b/src/access.rs @@ -0,0 +1,320 @@ +use crate::error::LdkLiteError as Error; + +use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::WatchedOutput; +use lightning::chain::{Confirm, Filter}; + +use bdk::blockchain::{Blockchain, EsploraBlockchain, GetBlockHash, GetHeight, GetTx}; +use bdk::database::BatchDatabase; +use bdk::wallet::AddressIndex; +use bdk::{SignOptions, SyncOptions}; + +use bitcoin::{BlockHash, Script, Transaction, Txid}; + +use std::sync::Mutex; + +pub struct LdkLiteChainAccess +where + D: BatchDatabase, +{ + blockchain: Mutex, + wallet: Mutex>, + queued_transactions: Mutex>, + watched_transactions: Mutex>, + queued_outputs: Mutex>, + watched_outputs: Mutex>, + last_sync_height: Mutex>, +} + +impl LdkLiteChainAccess +where + D: BatchDatabase, +{ + pub fn new(blockchain: EsploraBlockchain, wallet: bdk::Wallet) -> Self { + let blockchain = Mutex::new(blockchain); + let wallet = Mutex::new(wallet); + let watched_transactions = Mutex::new(Vec::new()); + let queued_transactions = Mutex::new(Vec::new()); + let watched_outputs = Mutex::new(Vec::new()); + let queued_outputs = Mutex::new(Vec::new()); + let last_sync_height = Mutex::new(None); + Self { + blockchain, + wallet, + queued_transactions, + watched_transactions, + queued_outputs, + watched_outputs, + last_sync_height, + } + } + + pub fn sync_wallet(&self) -> Result<(), Error> { + let sync_options = SyncOptions { progress: None }; + + let locked_chain = self.blockchain.lock().unwrap(); + self.wallet + .lock() + .unwrap() + .sync(&*locked_chain, sync_options) + .map_err(|e| Error::Bdk(e))?; + Ok(()) + } + + pub fn sync(&self, confirmables: Vec<&dyn Confirm>) -> Result<(), Error> { + let locked_chain = self.blockchain.lock().unwrap(); + let client = &*(*locked_chain); + + let cur_height = client.get_height()?; + + let mut locked_last_sync_height = self.last_sync_height.lock().unwrap(); + if cur_height >= locked_last_sync_height.unwrap_or(0) { + { + // First, inform the interface of the new block. + let cur_block_header = client.get_header(cur_height)?; + for c in &confirmables { + c.best_block_updated(&cur_block_header, cur_height); + } + + *locked_last_sync_height = Some(cur_height); + } + + { + // First, check the confirmation status of registered transactions as well as the + // status of dependent transactions of registered outputs. + let mut locked_queued_transactions = self.queued_transactions.lock().unwrap(); + let mut locked_queued_outputs = self.queued_outputs.lock().unwrap(); + let mut locked_watched_transactions = self.watched_transactions.lock().unwrap(); + let mut locked_watched_outputs = self.watched_outputs.lock().unwrap(); + + let mut confirmed_txs = Vec::new(); + + // Check in the current queue, as well as in registered transactions leftover from + // previous iterations. + let mut registered_txs: Vec = locked_watched_transactions + .iter() + .chain(locked_queued_transactions.iter()) + .cloned() + .collect(); + + registered_txs.sort_unstable_by(|txid1, txid2| txid1.cmp(&txid2)); + registered_txs.dedup_by(|txid1, txid2| txid1.eq(&txid2)); + + // Remember all registered but unconfirmed transactions for future processing. + let mut unconfirmed_registered_txs = Vec::new(); + + for txid in registered_txs { + if let Some(tx_status) = client.get_tx_status(&txid)? { + if tx_status.confirmed { + if let Some(tx) = client.get_tx(&txid)? { + if let Some(block_height) = tx_status.block_height { + let block_header = client.get_header(block_height)?; + if let Some(merkle_proof) = client.get_merkle_proof(&txid)? { + confirmed_txs.push(( + tx, + block_height, + block_header, + merkle_proof.pos, + )); + continue; + } + } + } + } + } + unconfirmed_registered_txs.push(txid); + } + + // Check all registered outputs for dependent spending transactions. + let registered_outputs: Vec = locked_watched_outputs + .iter() + .chain(locked_queued_outputs.iter()) + .cloned() + .collect(); + + // Remember all registered outputs that haven't been spent for future processing. + let mut unspent_registered_outputs = Vec::new(); + + for output in registered_outputs { + if let Some(output_status) = client + .get_output_status(&output.outpoint.txid, output.outpoint.index as u64)? + { + if output_status.spent { + if let Some(spending_tx_status) = output_status.status { + if spending_tx_status.confirmed { + let spending_txid = output_status.txid.unwrap(); + if let Some(spending_tx) = client.get_tx(&spending_txid)? { + let block_height = spending_tx_status.block_height.unwrap(); + let block_header = client.get_header(block_height)?; + if let Some(merkle_proof) = + client.get_merkle_proof(&spending_txid)? + { + confirmed_txs.push(( + spending_tx, + block_height, + block_header, + merkle_proof.pos, + )); + continue; + } + } + } + } + } + } + unspent_registered_outputs.push(output); + } + + // Sort all confirmed transactions by block height and feed them to the interface + // in order. + confirmed_txs.sort_unstable_by( + |(_, block_height1, _, _), (_, block_height2, _, _)| { + block_height1.cmp(&block_height2) + }, + ); + for (tx, block_height, block_header, pos) in confirmed_txs { + for c in &confirmables { + c.transactions_confirmed(&block_header, &[(pos, &tx)], block_height); + } + } + + *locked_watched_transactions = unconfirmed_registered_txs; + *locked_queued_transactions = Vec::new(); + *locked_watched_outputs = unspent_registered_outputs; + *locked_queued_outputs = Vec::new(); + } + + { + // Query the interface for relevant txids and check whether they have been + // reorged-out of the chain. + let unconfirmed_txids = confirmables + .iter() + .flat_map(|c| c.get_relevant_txids()) + .filter(|txid| { + client + .get_tx_status(txid) + .ok() + .unwrap_or(None) + .map_or(true, |status| !status.confirmed) + }) + .collect::>(); + + // Mark all relevant unconfirmed transactions as unconfirmed. + for txid in &unconfirmed_txids { + for c in &confirmables { + c.transaction_unconfirmed(txid); + } + } + } + } + + // TODO: check whether new outputs have been registered by now and process them + Ok(()) + } + + pub fn create_funding_transaction( + &self, output_script: &Script, value_sats: u64, confirmation_target: ConfirmationTarget, + ) -> Result { + let num_blocks = num_blocks_from_conf_target(confirmation_target); + let fee_rate = self.blockchain.lock().unwrap().estimate_fee(num_blocks)?; + + let locked_wallet = self.wallet.lock().unwrap(); + let mut tx_builder = locked_wallet.build_tx(); + + tx_builder.add_recipient(output_script.clone(), value_sats).fee_rate(fee_rate).enable_rbf(); + + let (mut psbt, _) = tx_builder.finish()?; + + // We double-check that no inputs try to spend non-witness outputs. As we use a SegWit + // wallet descriptor this technically can't ever happen, but better safe than sorry. + for input in &psbt.inputs { + if input.non_witness_utxo.is_some() { + return Err(Error::FundingTxNonWitnessOuputSpend); + } + } + + let finalized = locked_wallet.sign(&mut psbt, SignOptions::default())?; + if !finalized { + return Err(Error::FundingTxNotFinalized); + } + + Ok(psbt.extract_tx()) + } + + pub fn get_new_address(&self) -> Result { + let address_info = self.wallet.lock().unwrap().get_address(AddressIndex::New)?; + Ok(address_info.address) + } +} + +impl FeeEstimator for LdkLiteChainAccess +where + D: BatchDatabase, +{ + fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { + let num_blocks = num_blocks_from_conf_target(confirmation_target); + // TODO: make this an unwrap_or? + // TODO: double-check here https://github.com/bitcoindevkit/bdk/pull/678/commits/03a5b223800b0fafd0e7c2c82bf4943ac9d5ae58 + // TODO: switch to https://github.com/bitcoindevkit/bdk/pull/678 once that is merged + self.blockchain.lock().unwrap().estimate_fee(num_blocks).unwrap().fee_wu(1000) as u32 + } +} + +impl BroadcasterInterface for LdkLiteChainAccess +where + D: BatchDatabase, +{ + fn broadcast_transaction(&self, tx: &Transaction) { + self.blockchain.lock().unwrap().broadcast(tx).ok(); + } +} + +impl Filter for LdkLiteChainAccess +where + D: BatchDatabase, +{ + fn register_tx(&self, txid: &Txid, _script_pubkey: &Script) { + self.queued_transactions.lock().unwrap().push(*txid); + } + + fn register_output(&self, output: WatchedOutput) -> Option<(usize, Transaction)> { + self.queued_outputs.lock().unwrap().push(output); + // TODO: Remove return after rust-lightning#1663 gets merged + return None; + } +} + +impl GetHeight for LdkLiteChainAccess +where + D: BatchDatabase, +{ + fn get_height(&self) -> Result { + self.blockchain.lock().unwrap().get_height() + } +} + +impl GetBlockHash for LdkLiteChainAccess +where + D: BatchDatabase, +{ + fn get_block_hash(&self, height: u64) -> Result { + self.blockchain.lock().unwrap().get_block_hash(height) + } +} + +impl GetTx for LdkLiteChainAccess +where + D: BatchDatabase, +{ + fn get_tx(&self, txid: &Txid) -> Result, bdk::Error> { + self.blockchain.lock().unwrap().get_tx(txid) + } +} + +fn num_blocks_from_conf_target(confirmation_target: ConfirmationTarget) -> usize { + match confirmation_target { + ConfirmationTarget::Background => 6, + ConfirmationTarget::Normal => 3, + ConfirmationTarget::HighPriority => 1, + } +} diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 000000000..52b639cb1 --- /dev/null +++ b/src/error.rs @@ -0,0 +1,106 @@ +use bdk::blockchain::esplora; +use lightning::ln::msgs; +use std::fmt; +use std::io; +use std::sync::mpsc; +use std::time; + +#[derive(Debug)] +/// An error that possibly needs to be handled by the user. +pub enum LdkLiteError { + /// Returned when trying to start LdkLite while it is already running. + AlreadyRunning, + /// Returned when trying to stop LdkLite while it is not running. + NotRunning, + /// An input of the funding transaction tried spending a non-SegWit output. This should never happen, but + /// better safe than sorry.. + FundingTxNonWitnessOuputSpend, + /// The funding transaction could not be finalized. + FundingTxNotFinalized, + /// TODO + ChainStateMismatch, + /// A network connection has been closed. + ConnectionClosed, + /// A wrapped LDK `DecodeError` + Decode(msgs::DecodeError), + /// A wrapped BDK error + Bdk(bdk::Error), + /// A wrapped `Bip32` error + Bip32(bitcoin::util::bip32::Error), + /// A wrapped `std::io::Error` + Io(io::Error), + /// A wrapped `SystemTimeError` + Time(time::SystemTimeError), + /// A wrapped `EsploraError` + Esplora(esplora::EsploraError), + /// A wrapped `mpsc::RecvError` + ChannelRecv(mpsc::RecvError), +} + +impl fmt::Display for LdkLiteError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + LdkLiteError::Decode(ref e) => write!(f, "LDK decode error: {}", e), + LdkLiteError::Bdk(ref e) => write!(f, "BDK error: {}", e), + LdkLiteError::Bip32(ref e) => write!(f, "Bitcoin error: {}", e), + LdkLiteError::Io(ref e) => write!(f, "IO error: {}", e), + LdkLiteError::Time(ref e) => write!(f, "time error: {}", e), + LdkLiteError::Esplora(ref e) => write!(f, "Esplora error: {}", e), + LdkLiteError::ChannelRecv(ref e) => write!(f, "channel recv error: {}", e), + LdkLiteError::AlreadyRunning => write!(f, "LDKLite is already running."), + LdkLiteError::NotRunning => write!(f, "LDKLite is not running."), + LdkLiteError::FundingTxNonWitnessOuputSpend => write!(f, "an input of the funding transaction tried spending a non-SegWit output, which is insecure"), + LdkLiteError::FundingTxNotFinalized => write!(f, "the funding transaction could not be finalized"), + LdkLiteError::ChainStateMismatch => write!(f, "ChainStateMismatch"), + LdkLiteError::ConnectionClosed => write!(f, "network connection closed"), + } + } +} + +impl From for LdkLiteError { + fn from(e: msgs::DecodeError) -> Self { + Self::Decode(e) + } +} + +impl From for LdkLiteError { + fn from(e: bdk::Error) -> Self { + Self::Bdk(e) + } +} + +impl From for LdkLiteError { + fn from(e: bitcoin::util::bip32::Error) -> Self { + Self::Bip32(e) + } +} + +impl From for LdkLiteError { + fn from(e: bdk::electrum_client::Error) -> Self { + Self::Bdk(bdk::Error::Electrum(e)) + } +} + +impl From for LdkLiteError { + fn from(e: io::Error) -> Self { + Self::Io(e) + } +} + +impl From for LdkLiteError { + fn from(e: time::SystemTimeError) -> Self { + Self::Time(e) + } +} + +impl From for LdkLiteError { + fn from(e: mpsc::RecvError) -> Self { + Self::ChannelRecv(e) + } +} + +impl From for LdkLiteError { + fn from(e: esplora::EsploraError) -> Self { + Self::Esplora(e) + } +} diff --git a/src/event.rs b/src/event.rs new file mode 100644 index 000000000..f353229bd --- /dev/null +++ b/src/event.rs @@ -0,0 +1,134 @@ +use crate::{ + ChannelManager, EventSender, FilesystemLogger, LdkLiteChainAccess, LdkLiteConfig, NetworkGraph, + PaymentInfoStorage, +}; + +use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; +use lightning::chain::keysinterface::KeysManager; +use lightning::util::events as ldk_events; +use lightning::util::logger::Logger; +use lightning::{log_error, log_given_level, log_internal}; + +use bdk::database::MemoryDatabase; + +use bitcoin::secp256k1::Secp256k1; +//use std::collections::{hash_map, VecDeque}; +//use std::iter::Iterator; +use std::sync::Arc; + +/// An LdkLiteEvent that should be handled by the user. +pub enum LdkLiteEvent { + /// asdf + Test, +} + +pub(crate) struct LdkLiteEventHandler { + chain_access: Arc>, + channel_manager: Arc, + _network_graph: Arc, + keys_manager: Arc, + _inbound_payments: Arc, + _outbound_payments: Arc, + _event_sender: EventSender, + logger: Arc, + _config: Arc, +} + +impl LdkLiteEventHandler { + pub fn new( + chain_access: Arc>, + channel_manager: Arc, _network_graph: Arc, + keys_manager: Arc, _inbound_payments: Arc, + _outbound_payments: Arc, _event_sender: EventSender, + logger: Arc, _config: Arc, + ) -> Self { + Self { + chain_access, + channel_manager, + _network_graph, + keys_manager, + _inbound_payments, + _outbound_payments, + _event_sender, + logger, + _config, + } + } +} + +impl ldk_events::EventHandler for LdkLiteEventHandler { + // TODO: implement error handling for events (i.e., get rid of any unwraps()) + fn handle_event(&self, event: &ldk_events::Event) { + match event { + ldk_events::Event::FundingGenerationReady { + temporary_channel_id, + counterparty_node_id, + channel_value_satoshis, + output_script, + .. + } => { + // Construct the raw transaction with one output, that is paid the amount of the + // channel. + + // TODO: what is a good default target here? + let confirmation_target = ConfirmationTarget::Normal; + + // Sign the final funding transaction and broadcast it. + match self.chain_access.create_funding_transaction( + &output_script, + *channel_value_satoshis, + confirmation_target, + ) { + Ok(final_tx) => { + // Give the funding transaction back to LDK for opening the channel. + if self + .channel_manager + .funding_transaction_generated( + &temporary_channel_id, + counterparty_node_id, + final_tx, + ) + .is_err() + { + log_error!(self.logger, "Channel went away before we could fund it. The peer disconnected or refused the channel"); + } + } + Err(err) => { + log_error!(self.logger, "Failed to create funding transaction: {}", err); + } + } + } + ldk_events::Event::PaymentReceived { .. } => {} + ldk_events::Event::PaymentClaimed { .. } => {} + ldk_events::Event::PaymentSent { .. } => {} + ldk_events::Event::PaymentFailed { .. } => {} + ldk_events::Event::PaymentPathSuccessful { .. } => {} + ldk_events::Event::PaymentPathFailed { .. } => {} + ldk_events::Event::ProbeSuccessful { .. } => {} + ldk_events::Event::ProbeFailed { .. } => {} + ldk_events::Event::HTLCHandlingFailed { .. } => {} + ldk_events::Event::PendingHTLCsForwardable { .. } => {} + ldk_events::Event::SpendableOutputs { outputs } => { + let destination_address = self.chain_access.get_new_address().unwrap(); + let output_descriptors = &outputs.iter().map(|a| a).collect::>(); + let tx_feerate = + self.chain_access.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + let spending_tx = self + .keys_manager + .spend_spendable_outputs( + output_descriptors, + Vec::new(), + destination_address.script_pubkey(), + tx_feerate, + &Secp256k1::new(), + ) + .unwrap(); + self.chain_access.broadcast_transaction(&spending_tx); + } + ldk_events::Event::OpenChannelRequest { .. } => {} + ldk_events::Event::PaymentForwarded { .. } => {} + ldk_events::Event::ChannelClosed { .. } => {} + ldk_events::Event::DiscardFunding { .. } => {} + } + } +} diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 000000000..bc785b72c --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,704 @@ +// This file is Copyright its original authors, visible in version contror +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +#![crate_name = "ldk_lite"] + +//! A library providing a simplified API for the Lightning Dev Kit. While LDK itself provides a +//! highly configurable and adaptable interface, this API champions simplicity and ease of use over +//! configurability. To this end, it provides an opionated set of design choices and ready-to-go +//! default modules, while still enabling some configurability when dearly needed by the user: +//! - Chain data is accessed through an Esplora client. +//! - Wallet and channel states are persisted to disk. +//! - Gossip is retrieved over the P2P network. + +#![deny(missing_docs)] +#![deny(broken_intra_doc_links)] +#![deny(private_intra_doc_links)] +#![allow(bare_trait_objects)] +#![allow(ellipsis_inclusive_range_patterns)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] + +mod access; +mod error; +mod event; +mod logger; +mod util; + +use access::LdkLiteChainAccess; +pub use error::LdkLiteError as Error; +pub use event::LdkLiteEvent; +use event::LdkLiteEventHandler; +use logger::FilesystemLogger; + +use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient}; +use lightning::chain::{chainmonitor, Access, BestBlock, Confirm, Filter, Watch}; +use lightning::ln::channelmanager; +use lightning::ln::channelmanager::{ + ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager, +}; +use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; +use lightning::ln::PaymentHash; +use lightning::routing::gossip; +use lightning::routing::gossip::P2PGossipSync; +use lightning::routing::scoring::ProbabilisticScorer; + +use lightning::util::config::UserConfig; +use lightning::util::logger::Logger; +use lightning::util::ser::ReadableArgs; +use lightning::{log_error, log_given_level, log_internal}; + +use lightning_background_processor::BackgroundProcessor; +use lightning_background_processor::GossipSync as BPGossipSync; +use lightning_persister::FilesystemPersister; + +use lightning_net_tokio::SocketDescriptor; + +use lightning_invoice::payment; +use lightning_invoice::utils::DefaultRouter; + +use bdk::blockchain::esplora::EsploraBlockchain; +use bdk::blockchain::{GetBlockHash, GetHeight}; +use bdk::database::MemoryDatabase; + +use bitcoin::secp256k1::PublicKey; +use bitcoin::BlockHash; + +use rand::RngCore; + +use std::collections::HashMap; +use std::fs; +use std::net::SocketAddr; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::mpsc; +use std::sync::{Arc, Mutex, RwLock}; +use std::thread; +use std::time::{Duration, SystemTime}; + +// TODO: Is MemoryDatabase okay to use? + +const CHANNEL_BUF_SIZE: usize = 1000; + +#[derive(Debug, Clone)] +/// Represents the configuration of an `LdkLite` instance. +pub struct LdkLiteConfig { + /// The path where the underlying LDK and BDK persist their data. + pub storage_dir_path: String, + /// The URL of the utilized Esplora server. + pub esplora_server_url: String, + /// The used Bitcoin network. + pub network: bitcoin::Network, + /// The TCP port the network node will listen on. + pub listening_port: u16, +} + +/// A builder for an [`LdkLite`] instance, allowing to set some configuration and module choices from +/// the getgo. +#[derive(Debug, Clone)] +pub struct LdkLiteBuilder { + config: LdkLiteConfig, +} + +impl LdkLiteBuilder { + /// Creates a new builder instance with the default configuration. + pub fn new() -> Self { + // Set the config defaults + let storage_dir_path = "/tmp/ldk_lite/".to_string(); + let esplora_server_url = "https://blockstream.info/api".to_string(); + let network = bitcoin::Network::Testnet; + let listening_port = 9735; + + let config = + LdkLiteConfig { storage_dir_path, esplora_server_url, network, listening_port }; + + Self { config } + } + + /// Creates a new builder instance from an [`LdkLiteConfig`]. + pub fn from_config(config: LdkLiteConfig) -> Self { + Self { config } + } + + /// Sets the used storage directory path. + /// + /// Default: `/tmp/ldk_lite/` + pub fn storage_dir_path(&mut self, storage_dir_path: String) -> &mut Self { + self.config.storage_dir_path = storage_dir_path; + self + } + + /// Sets the Esplora server URL. + /// + /// Default: `https://blockstream.info/api` + pub fn esplora_server_url(&mut self, esplora_server_url: String) -> &mut Self { + self.config.esplora_server_url = esplora_server_url; + self + } + + /// Sets the Bitcoin network used. + /// + /// Options: `mainnet`/`bitcoin`, `testnet`, `regtest`, `signet` + /// + /// Default: `testnet` + pub fn network(&mut self, network: String) -> &mut Self { + self.config.network = match network.as_str() { + "mainnet" => bitcoin::Network::Bitcoin, + "bitcoin" => bitcoin::Network::Bitcoin, + "testnet" => bitcoin::Network::Testnet, + "regtest" => bitcoin::Network::Regtest, + "signet" => bitcoin::Network::Signet, + _ => bitcoin::Network::Testnet, + }; + self + } + + /// Sets the port on which [`LdkLite`] will listen for incoming network connections. + /// + /// Default: `9735` + pub fn listening_port(&mut self, listening_port: u16) -> &mut Self { + self.config.listening_port = listening_port; + self + } + + /// Builds an [`LdkLite`] instance according to the options previously configured. + pub fn build(self) -> Result { + let config = Arc::new(self.config); + + let ldk_data_dir = format!("{}/ldk", &config.storage_dir_path.clone()); + fs::create_dir_all(ldk_data_dir.clone())?; + + let bdk_data_dir = format!("{}/bdk", config.storage_dir_path.clone()); + fs::create_dir_all(bdk_data_dir.clone())?; + + // Step 0: Initialize the Logger + let logger = Arc::new(FilesystemLogger::new(config.storage_dir_path.clone())); + + // Step 1: Initialize the on-chain wallet and chain access + let seed = util::read_or_generate_seed_file(Arc::clone(&config))?; + let xprv = bitcoin::util::bip32::ExtendedPrivKey::new_master(config.network, &seed)?; + + let bdk_wallet = bdk::Wallet::new( + bdk::template::Bip84(xprv.clone(), bdk::KeychainKind::External), + Some(bdk::template::Bip84(xprv.clone(), bdk::KeychainKind::Internal)), + config.network, + MemoryDatabase::default(), + )?; + + // TODO: Check that we can be sure that the Esplora client re-connects in case of failure + // and and exits cleanly on drop. Otherwise we need to handle this/move it to the runtime? + let blockchain = EsploraBlockchain::new(&config.esplora_server_url, 20).with_concurrency(8); + + let chain_access = Arc::new(LdkLiteChainAccess::new(blockchain, bdk_wallet)); + + // Step 3: Initialize Persist + let persister = Arc::new(FilesystemPersister::new(ldk_data_dir.clone())); + + // Step 4: Initialize the ChainMonitor + let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( + None, + Arc::clone(&chain_access), + Arc::clone(&logger), + Arc::clone(&chain_access), + Arc::clone(&persister), + )); + + // Step 5: Initialize the KeysManager + let cur = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; + let keys_manager = Arc::new(KeysManager::new(&seed, cur.as_secs(), cur.subsec_nanos())); + + // Step 6: Read ChannelMonitor state from disk + let mut channel_monitors = persister.read_channelmonitors(keys_manager.clone())?; + + // Step 7: Initialize the ChannelManager + let mut user_config = UserConfig::default(); + user_config.channel_handshake_limits.force_announced_channel_preference = false; + let (_channel_manager_blockhash, channel_manager) = { + if let Ok(mut f) = fs::File::open(format!("{}/manager", ldk_data_dir.clone())) { + let mut channel_monitor_mut_references = Vec::new(); + for (_, channel_monitor) in channel_monitors.iter_mut() { + channel_monitor_mut_references.push(channel_monitor); + } + let read_args = ChannelManagerReadArgs::new( + Arc::clone(&keys_manager), + Arc::clone(&chain_access), + Arc::clone(&chain_monitor), + Arc::clone(&chain_access), + Arc::clone(&logger), + user_config, + channel_monitor_mut_references, + ); + <(BlockHash, ChannelManager)>::read(&mut f, read_args)? + } else { + // We're starting a fresh node. + let latest_block_height = chain_access.get_height()?; + let latest_block_hash = chain_access.get_block_hash(latest_block_height as u64)?; + + let chain_params = ChainParameters { + network: config.network, + best_block: BestBlock::new(latest_block_hash, latest_block_height), + }; + let fresh_channel_manager = channelmanager::ChannelManager::new( + Arc::clone(&chain_access), + Arc::clone(&chain_monitor), + Arc::clone(&chain_access), + Arc::clone(&logger), + Arc::clone(&keys_manager), + user_config, + chain_params, + ); + (latest_block_hash, fresh_channel_manager) + } + }; + + let channel_manager = Arc::new(channel_manager); + + // Step 8: Give ChannelMonitors to ChainMonitor + for (_blockhash, channel_monitor) in channel_monitors.drain(..) { + let funding_outpoint = channel_monitor.get_funding_txo().0; + chain_monitor.watch_channel(funding_outpoint, channel_monitor).unwrap(); + } + + // TODO: Use RGS on first start, if configured + // Step 10: Initialize the P2PGossipSync + let network_graph = + Arc::new(util::read_network_graph(Arc::clone(&config), Arc::clone(&logger))?); + let gossip_sync = Arc::new(P2PGossipSync::new( + Arc::clone(&network_graph), + None::>, + Arc::clone(&logger), + )); + + //// Step 11: Initialize the PeerManager + let mut ephemeral_bytes = [0; 32]; + rand::thread_rng().fill_bytes(&mut ephemeral_bytes); + let lightning_msg_handler = MessageHandler { + chan_handler: Arc::clone(&channel_manager), + route_handler: Arc::clone(&gossip_sync), + }; + + let peer_manager: Arc = Arc::new(PeerManager::new( + lightning_msg_handler, + keys_manager.get_node_secret(Recipient::Node).unwrap(), + &ephemeral_bytes, + Arc::clone(&logger), + Arc::new(IgnoringMessageHandler {}), + )); + + // Step 12: Initialize routing ProbabilisticScorer + let scorer = Arc::new(Mutex::new(util::read_scorer( + Arc::clone(&config), + Arc::clone(&network_graph), + Arc::clone(&logger), + ))); + + // Step 13: Init payment info storage + // TODO: persist payment info to disk + let _inbound_payments = Arc::new(Mutex::new(HashMap::new())); + let _outbound_payments = Arc::new(Mutex::new(HashMap::new())); + + // Step 14: Handle LDK Events + let event_queue = mpsc::sync_channel(CHANNEL_BUF_SIZE); + let event_sender = event_queue.0.clone(); + let event_handler = LdkLiteEventHandler::new( + Arc::clone(&chain_access), + Arc::clone(&channel_manager), + Arc::clone(&network_graph), + Arc::clone(&keys_manager), + Arc::clone(&_inbound_payments), + Arc::clone(&_outbound_payments), + event_sender, + Arc::clone(&logger), + Arc::clone(&config), + ); + + //// Step 16: Create Router and InvoicePayer + let router = DefaultRouter::new( + Arc::clone(&network_graph), + Arc::clone(&logger), + keys_manager.get_secure_random_bytes(), + ); + + let invoice_payer = Arc::new(InvoicePayer::new( + Arc::clone(&channel_manager), + router, + Arc::clone(&scorer), + Arc::clone(&logger), + event_handler, + payment::Retry::Timeout(Duration::from_secs(10)), + )); + + let running = RwLock::new(None); + + Ok(LdkLite { + running, + config, + chain_access, + channel_manager, + chain_monitor, + peer_manager, + gossip_sync, + persister, + logger, + network_graph, + scorer, + invoice_payer, + _inbound_payments, + _outbound_payments, + event_queue, + }) + } +} + +/// Wraps all objects that need to be preserved during the run time of `LdkLite`. Will be dropped +/// upon [`LdkLite::stop()`]. +struct LdkLiteRuntime { + _tokio_runtime: tokio::runtime::Runtime, + _background_processor: BackgroundProcessor, + stop_networking: Arc, + stop_wallet_sync: Arc, +} + +/// The main interface object of the simplified API, wrapping the necessary LDK and BDK functionalities. +/// +/// Needs to be initialized and instantiated through [`LdkLiteBuilder::build`]. +pub struct LdkLite { + running: RwLock>, + config: Arc, + chain_access: Arc>, + channel_manager: Arc, + chain_monitor: Arc, + peer_manager: Arc, + gossip_sync: Arc, + persister: Arc, + logger: Arc, + scorer: Arc>, + network_graph: Arc, + invoice_payer: Arc>, + _inbound_payments: Arc, + _outbound_payments: Arc, + event_queue: (EventSender, EventReceiver), +} + +impl LdkLite { + /// Starts the necessary background tasks, such as handling events coming from user input, + /// LDK/BDK, and the peer-to-peer network. After this returns, the [`LdkLite`] instance can be + /// controlled via the provided API methods in a thread-safe manner. + pub fn start(&mut self) -> Result<(), Error> { + // Acquire a run lock and hold it until we're setup. + // TODO: maybe this could be done cleaner with a Condvar? + let mut run_lock = self.running.write().unwrap(); + if run_lock.is_some() { + // We're already running. + return Err(Error::AlreadyRunning); + } + + let runtime = self.setup_runtime()?; + *run_lock = Some(runtime); + Ok(()) + } + + /// Disconnects all peers, stops all running background tasks, and shuts down [`LdkLite`]. + pub fn stop(&mut self) -> Result<(), Error> { + let mut run_lock = self.running.write().unwrap(); + if run_lock.is_none() { + // We're not running. + return Err(Error::NotRunning); + } + + let runtime = run_lock.as_ref().unwrap(); + + // Stop wallet sync + runtime.stop_wallet_sync.store(true, Ordering::Release); + + // Stop networking + runtime.stop_networking.store(true, Ordering::Release); + self.peer_manager.disconnect_all_peers(); + + // Wait for 500ms to make sure shutdown can happen cleanly. + thread::sleep(Duration::from_millis(500)); + + // Drop the runtime, which stops the background processor and any possibly remaining tokio threads. + *run_lock = None; + Ok(()) + } + + fn setup_runtime(&self) -> Result { + let _tokio_runtime = + tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); + + // Setup wallet sync + let chain_access = Arc::clone(&self.chain_access); + let sync_cman = Arc::clone(&self.channel_manager); + let sync_cmon = Arc::clone(&self.chain_monitor); + let stop_wallet_sync = Arc::new(AtomicBool::new(false)); + let stop_sync = Arc::clone(&stop_wallet_sync); + + thread::spawn(move || { + let mut rounds = 0; + loop { + if stop_sync.load(Ordering::Acquire) { + return; + } + // As syncing the on-chain wallet is much more time-intesive, we only sync every + // fifth round. + // TODO: make this superfluous by integrating LDK's sync with the BDK wallet sync + // properly. + if rounds == 0 { + chain_access.sync_wallet().unwrap(); + } + rounds = (rounds + 1) % 5; + + let confirmables = vec![&*sync_cman as &dyn Confirm, &*sync_cmon as &dyn Confirm]; + chain_access.sync(confirmables).unwrap(); + thread::sleep(Duration::from_secs(5)); + } + }); + + // Setup networking + let peer_manager_connection_handler = Arc::clone(&self.peer_manager); + let listening_port = self.config.listening_port; + let stop_networking = Arc::new(AtomicBool::new(false)); + let stop_listen = Arc::clone(&stop_networking); + + _tokio_runtime.spawn(async move { + let listener = + tokio::net::TcpListener::bind(format!("0.0.0.0:{}", listening_port)).await.expect( + "Failed to bind to listen port - is something else already listening on it?", + ); + loop { + if stop_listen.load(Ordering::Acquire) { + return; + } + let peer_mgr = Arc::clone(&peer_manager_connection_handler); + let tcp_stream = listener.accept().await.unwrap().0; + tokio::spawn(async move { + lightning_net_tokio::setup_inbound( + Arc::clone(&peer_mgr), + tcp_stream.into_std().unwrap(), + ) + .await; + }); + } + }); + + // Regularly reconnect to channel peers. + let connect_cm = Arc::clone(&self.channel_manager); + let connect_pm = Arc::clone(&self.peer_manager); + let connect_config = Arc::clone(&self.config); + let connect_logger = Arc::clone(&self.logger); + let stop_connect = Arc::clone(&stop_networking); + _tokio_runtime.spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(1)); + loop { + if stop_connect.load(Ordering::Acquire) { + return; + } + interval.tick().await; + match util::read_channel_peer_data(Arc::clone(&connect_config)) { + Ok(info) => { + let peers = connect_pm.get_peer_node_ids(); + for node_id in connect_cm + .list_channels() + .iter() + .map(|chan| chan.counterparty.node_id) + .filter(|id| !peers.contains(id)) + { + for (pubkey, peer_addr) in info.iter() { + if *pubkey == node_id { + let _ = do_connect_peer( + *pubkey, + peer_addr.clone(), + Arc::clone(&connect_pm), + ) + .await; + } + } + } + } + Err(e) => { + log_error!( + connect_logger, + "failure reading channel peer info from disk: {:?}", + e + ) + } + } + } + }); + + // Setup background processing + let _background_processor = BackgroundProcessor::start( + Arc::clone(&self.persister), + Arc::clone(&self.invoice_payer), + Arc::clone(&self.chain_monitor), + Arc::clone(&self.channel_manager), + BPGossipSync::p2p(Arc::clone(&self.gossip_sync)), + Arc::clone(&self.peer_manager), + Arc::clone(&self.logger), + Some(Arc::clone(&self.scorer)), + ); + + // TODO: frequently check back on background_processor if there was an error + + Ok(LdkLiteRuntime { + _tokio_runtime, + _background_processor, + stop_networking, + stop_wallet_sync, + }) + } + + /// Returns the next event from the event queue. Blocks until a new event is available. + pub fn next_event(&self) -> Result { + Ok(self.event_queue.1.recv()?) + } + + /// Returns our own node id + pub fn my_node_id(&self) -> Result { + if self.running.read().unwrap().is_none() { + // We're not running. + return Err(Error::NotRunning); + } + + Ok(self.channel_manager.get_our_node_id()) + } + + /// Retrieve a new on-chain/funding address. + pub fn new_funding_address(&mut self) -> Result { + if self.running.read().unwrap().is_none() { + // We're not running. + return Err(Error::NotRunning); + } + + self.chain_access.get_new_address() + } + + // Connect to a node and open a new channel. Disconnects and re-connects should be handled automatically + //pub fn connect_open_channel(&mut self, node_id: PublicKey, node_address: NetAddress) -> Result { + //} +} + +async fn connect_peer_if_necessary( + pubkey: PublicKey, peer_addr: SocketAddr, peer_manager: Arc, +) -> Result<(), Error> { + for node_pubkey in peer_manager.get_peer_node_ids() { + if node_pubkey == pubkey { + return Ok(()); + } + } + + do_connect_peer(pubkey, peer_addr, peer_manager).await +} + +async fn do_connect_peer( + pubkey: PublicKey, peer_addr: SocketAddr, peer_manager: Arc, +) -> Result<(), Error> { + match lightning_net_tokio::connect_outbound(Arc::clone(&peer_manager), pubkey, peer_addr).await + { + Some(connection_closed_future) => { + let mut connection_closed_future = Box::pin(connection_closed_future); + loop { + match futures::poll!(&mut connection_closed_future) { + std::task::Poll::Ready(_) => { + return Err(Error::ConnectionClosed); + } + std::task::Poll::Pending => {} + } + // Avoid blocking the tokio context by sleeping a bit + match peer_manager.get_peer_node_ids().iter().find(|id| **id == pubkey) { + Some(_) => return Ok(()), + None => tokio::time::sleep(Duration::from_millis(10)).await, + } + } + } + None => Err(Error::ConnectionClosed), + } +} + +// // Close a previously opened channel +// pub close_channel(&mut self, channel_id: u64) -> Result<()>; +// +// // Pay an invoice +// pub send_payment(&mut self, invoice: Invoice) -> Result +// +// // Send a spontaneous, aka. "keysend", payment +// pub send_spontaneous_payment(&mut self, amount: u64, node_id: PublicKey) -> Result; +// +// // Create an invoice to receive a payment +// pub receive_payment(&mut self, amount: Option) -> Invoice; +// +// // Get a new on-chain/funding address. +// pub new_funding_address(&mut self) -> Address; +// +// // Query for information about payment status. +// pub payment_info(&mut self) -> PaymentInfo; +// +// // Query for information about our channels +// pub channel_info(&mut self) -> ChannelInfo; +// +// // Query for information about our on-chain/funding status. +// pub funding_info(&mut self) -> FundingInfo; +//} +// +// Structs wrapping the particular information which should easily be +// understandable, parseable, and transformable, i.e., we'll try to avoid +// exposing too many technical detail here. +struct PaymentInfo; +//struct ChannelInfo; +//struct FundingInfo; + +/// TODO + +type ChainMonitor = chainmonitor::ChainMonitor< + InMemorySigner, + Arc, + Arc>, + Arc>, + Arc, + Arc, +>; + +type PeerManager = SimpleArcPeerManager< + SocketDescriptor, + ChainMonitor, + LdkLiteChainAccess, + LdkLiteChainAccess, + dyn Access + Send + Sync, + FilesystemLogger, +>; + +pub(crate) type ChannelManager = SimpleArcChannelManager< + ChainMonitor, + LdkLiteChainAccess, + LdkLiteChainAccess, + FilesystemLogger, +>; + +type InvoicePayer = payment::InvoicePayer< + Arc, + Router, + Arc>, + Arc, + F, +>; + +type Router = DefaultRouter, Arc>; +type Scorer = ProbabilisticScorer, Arc>; + +type GossipSync = + P2PGossipSync, Arc, Arc>; + +pub(crate) type NetworkGraph = gossip::NetworkGraph>; + +pub(crate) type PaymentInfoStorage = Mutex>; + +pub(crate) type EventSender = mpsc::SyncSender; +pub(crate) type EventReceiver = mpsc::Receiver; + +#[cfg(test)] +mod tests {} diff --git a/src/logger.rs b/src/logger.rs new file mode 100644 index 000000000..a5f534aea --- /dev/null +++ b/src/logger.rs @@ -0,0 +1,38 @@ +use lightning::util::logger::{Logger, Record}; +use lightning::util::ser::Writer; + +use chrono::Utc; + +use std::fs; + +pub(crate) struct FilesystemLogger { + data_dir: String, +} +impl FilesystemLogger { + pub(crate) fn new(data_dir: String) -> Self { + let logs_path = format!("{}/logs", data_dir); + fs::create_dir_all(logs_path.clone()).unwrap(); + Self { data_dir: logs_path } + } +} +impl Logger for FilesystemLogger { + fn log(&self, record: &Record) { + let raw_log = record.args.to_string(); + let log = format!( + "{} {:<5} [{}:{}] {}\n", + Utc::now().format("%Y-%m-%d %H:%M:%S"), + record.level.to_string(), + record.module_path, + record.line, + raw_log + ); + let logs_file_path = format!("{}/logs.txt", self.data_dir.clone()); + fs::OpenOptions::new() + .create(true) + .append(true) + .open(logs_file_path) + .unwrap() + .write_all(log.as_bytes()) + .unwrap(); + } +} diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 000000000..9cf5811e2 --- /dev/null +++ b/src/util.rs @@ -0,0 +1,175 @@ +use crate::error::LdkLiteError as Error; +use crate::logger::FilesystemLogger; +use crate::{LdkLiteConfig, NetworkGraph, Scorer}; + +use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters}; +use lightning::util::ser::ReadableArgs; + +use bitcoin::secp256k1::PublicKey; + +use rand::{thread_rng, RngCore}; + +use std::collections::HashMap; +use std::fs; +use std::io::{BufRead, BufReader, Write}; +use std::net::{SocketAddr, ToSocketAddrs}; +use std::sync::Arc; + +pub(crate) fn read_or_generate_seed_file(config: Arc) -> Result<[u8; 32], Error> { + let keys_seed_path = format!("{}/keys_seed", config.storage_dir_path); + let keys_seed = if let Ok(seed) = fs::read(keys_seed_path.clone()) { + assert_eq!(seed.len(), 32); + let mut key = [0; 32]; + key.copy_from_slice(&seed); + key + } else { + let mut key = [0; 32]; + thread_rng().fill_bytes(&mut key); + + let mut f = fs::File::create(keys_seed_path.clone()).map_err(|e| Error::Io(e))?; + f.write_all(&key).expect("Failed to write node keys seed to disk"); + f.sync_all().expect("Failed to sync node keys seed to disk"); + key + }; + + Ok(keys_seed) +} + +pub(crate) fn read_network_graph( + config: Arc, logger: Arc, +) -> Result { + let ldk_data_dir = format!("{}/ldk", &config.storage_dir_path.clone()); + let network_graph_path = format!("{}/network_graph", ldk_data_dir.clone()); + + if let Ok(file) = fs::File::open(network_graph_path) { + if let Ok(graph) = NetworkGraph::read(&mut BufReader::new(file), Arc::clone(&logger)) { + return Ok(graph); + } + } + + let genesis_hash = + bitcoin::blockdata::constants::genesis_block(config.network).header.block_hash(); + Ok(NetworkGraph::new(genesis_hash, logger)) +} + +pub(crate) fn read_scorer( + config: Arc, network_graph: Arc, logger: Arc, +) -> Scorer { + let ldk_data_dir = format!("{}/ldk", &config.storage_dir_path.clone()); + let scorer_path = format!("{}/scorer", ldk_data_dir.clone()); + + let params = ProbabilisticScoringParameters::default(); + if let Ok(file) = fs::File::open(scorer_path) { + let args = (params.clone(), Arc::clone(&network_graph), Arc::clone(&logger)); + if let Ok(scorer) = ProbabilisticScorer::read(&mut BufReader::new(file), args) { + return scorer; + } + } + ProbabilisticScorer::new(params, network_graph, logger) +} + +pub(crate) fn read_channel_peer_data( + config: Arc, +) -> Result, Error> { + let ldk_data_dir = format!("{}/ldk", &config.storage_dir_path.clone()); + let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone()); + let mut peer_data = HashMap::new(); + + if let Ok(file) = fs::File::open(peer_data_path) { + let reader = BufReader::new(file); + for line in reader.lines() { + match parse_peer_info(line.unwrap()) { + Ok((pubkey, socket_addr)) => { + peer_data.insert(pubkey, socket_addr); + } + Err(e) => return Err(Error::Io(e)), + } + } + } + Ok(peer_data) +} + +pub(crate) fn persist_channel_peer( + config: Arc, peer_info: &str, +) -> std::io::Result<()> { + let ldk_data_dir = format!("{}/ldk", &config.storage_dir_path.clone()); + let peer_data_path = format!("{}/channel_peer_data", ldk_data_dir.clone()); + let mut file = fs::OpenOptions::new().create(true).append(true).open(peer_data_path)?; + file.write_all(format!("{}\n", peer_info).as_bytes()) +} + +pub(crate) fn parse_peer_info( + peer_pubkey_and_ip_addr: String, +) -> Result<(PublicKey, SocketAddr), std::io::Error> { + let mut pubkey_and_addr = peer_pubkey_and_ip_addr.split("@"); + let pubkey = pubkey_and_addr.next(); + let peer_addr_str = pubkey_and_addr.next(); + if peer_addr_str.is_none() || peer_addr_str.is_none() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "ERROR: incorrectly formatted peer info. Should be formatted as: `pubkey@host:port`", + )); + } + + let peer_addr = peer_addr_str.unwrap().to_socket_addrs().map(|mut r| r.next()); + if peer_addr.is_err() || peer_addr.as_ref().unwrap().is_none() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "ERROR: couldn't parse pubkey@host:port into a socket address", + )); + } + + let pubkey = hex_to_compressed_pubkey(pubkey.unwrap()); + if pubkey.is_none() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "ERROR: unable to parse given pubkey for node", + )); + } + + Ok((pubkey.unwrap(), peer_addr.unwrap().unwrap())) +} + +pub fn hex_to_vec(hex: &str) -> Option> { + let mut out = Vec::with_capacity(hex.len() / 2); + + let mut b = 0; + for (idx, c) in hex.as_bytes().iter().enumerate() { + b <<= 4; + match *c { + b'A'..=b'F' => b |= c - b'A' + 10, + b'a'..=b'f' => b |= c - b'a' + 10, + b'0'..=b'9' => b |= c - b'0', + _ => return None, + } + if (idx & 1) == 1 { + out.push(b); + b = 0; + } + } + + Some(out) +} + +#[inline] +pub fn hex_str(value: &[u8]) -> String { + let mut res = String::with_capacity(64); + for v in value { + res += &format!("{:02x}", v); + } + res +} + +pub fn hex_to_compressed_pubkey(hex: &str) -> Option { + if hex.len() != 33 * 2 { + return None; + } + let data = match hex_to_vec(&hex[0..33 * 2]) { + Some(bytes) => bytes, + None => return None, + }; + match PublicKey::from_slice(&data) { + Ok(pk) => Some(pk), + Err(_) => None, + } +} From eb4b5fba28f5ec2d9ee5a93a848011ce980a6b2a Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 26 Aug 2022 21:31:06 +0200 Subject: [PATCH 02/20] f `String` -> `str` --- Cargo.toml | 1 - src/lib.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a5769df19..a7040b6d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,6 @@ edition = "2018" [dependencies] lightning = { version = "0.0.110", features = ["max_level_trace", "std"] } -lightning-block-sync = { version = "0.0.110", features = [ "rpc-client" ] } lightning-invoice = { version = "0.18" } lightning-net-tokio = { version = "0.0.110" } lightning-persister = { version = "0.0.110" } diff --git a/src/lib.rs b/src/lib.rs index bc785b72c..55650e75e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -145,8 +145,8 @@ impl LdkLiteBuilder { /// Options: `mainnet`/`bitcoin`, `testnet`, `regtest`, `signet` /// /// Default: `testnet` - pub fn network(&mut self, network: String) -> &mut Self { - self.config.network = match network.as_str() { + pub fn network(&mut self, network: &str) -> &mut Self { + self.config.network = match network { "mainnet" => bitcoin::Network::Bitcoin, "bitcoin" => bitcoin::Network::Bitcoin, "testnet" => bitcoin::Network::Testnet, From aed17592518401968f907f9c8f313309cf31c3c2 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 26 Aug 2022 21:32:05 +0200 Subject: [PATCH 03/20] f No sleep --- src/lib.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 55650e75e..8bb116ad1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -419,9 +419,6 @@ impl LdkLite { runtime.stop_networking.store(true, Ordering::Release); self.peer_manager.disconnect_all_peers(); - // Wait for 500ms to make sure shutdown can happen cleanly. - thread::sleep(Duration::from_millis(500)); - // Drop the runtime, which stops the background processor and any possibly remaining tokio threads. *run_lock = None; Ok(()) From 7e68355812ec18b906d95f97efc58920aecf572e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 26 Aug 2022 21:34:10 +0200 Subject: [PATCH 04/20] f Use `tokio::spawn` for now --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 8bb116ad1..a5c9755f3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -435,7 +435,7 @@ impl LdkLite { let stop_wallet_sync = Arc::new(AtomicBool::new(false)); let stop_sync = Arc::clone(&stop_wallet_sync); - thread::spawn(move || { + _tokio_runtime.spawn(async move { let mut rounds = 0; loop { if stop_sync.load(Ordering::Acquire) { From 58b382dee44798d43ef41366034a88fb17701c47 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Sat, 27 Aug 2022 10:53:08 +0200 Subject: [PATCH 05/20] Impl `send_payment` / `send_spontaneous_payment` --- src/error.rs | 11 ++++++ src/lib.rs | 96 ++++++++++++++++++++++++++++++++++------------------ 2 files changed, 75 insertions(+), 32 deletions(-) diff --git a/src/error.rs b/src/error.rs index 52b639cb1..45b05100a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,5 +1,6 @@ use bdk::blockchain::esplora; use lightning::ln::msgs; +use lightning_invoice::payment; use std::fmt; use std::io; use std::sync::mpsc; @@ -23,6 +24,8 @@ pub enum LdkLiteError { ConnectionClosed, /// A wrapped LDK `DecodeError` Decode(msgs::DecodeError), + /// A wrapped LDK `PaymentError` + Payment(payment::PaymentError), /// A wrapped BDK error Bdk(bdk::Error), /// A wrapped `Bip32` error @@ -41,6 +44,8 @@ impl fmt::Display for LdkLiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { LdkLiteError::Decode(ref e) => write!(f, "LDK decode error: {}", e), + // TODO: print more sensible things based on the type of payment error + LdkLiteError::Payment(ref e) => write!(f, "LDK payment error: {:?}", e), LdkLiteError::Bdk(ref e) => write!(f, "BDK error: {}", e), LdkLiteError::Bip32(ref e) => write!(f, "Bitcoin error: {}", e), LdkLiteError::Io(ref e) => write!(f, "IO error: {}", e), @@ -63,6 +68,12 @@ impl From for LdkLiteError { } } +impl From for LdkLiteError { + fn from(e: payment::PaymentError) -> Self { + Self::Payment(e) + } +} + impl From for LdkLiteError { fn from(e: bdk::Error) -> Self { Self::Bdk(e) diff --git a/src/lib.rs b/src/lib.rs index a5c9755f3..188ea7da6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,10 +40,10 @@ use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager use lightning::chain::{chainmonitor, Access, BestBlock, Confirm, Filter, Watch}; use lightning::ln::channelmanager; use lightning::ln::channelmanager::{ - ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager, + ChainParameters, ChannelManagerReadArgs, PaymentId, SimpleArcChannelManager, }; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; -use lightning::ln::PaymentHash; +use lightning::ln::{PaymentHash, PaymentPreimage}; use lightning::routing::gossip; use lightning::routing::gossip::P2PGossipSync; use lightning::routing::scoring::ProbabilisticScorer; @@ -59,8 +59,8 @@ use lightning_persister::FilesystemPersister; use lightning_net_tokio::SocketDescriptor; -use lightning_invoice::payment; use lightning_invoice::utils::DefaultRouter; +use lightning_invoice::{payment, Invoice}; use bdk::blockchain::esplora::EsploraBlockchain; use bdk::blockchain::{GetBlockHash, GetHeight}; @@ -95,6 +95,8 @@ pub struct LdkLiteConfig { pub network: bitcoin::Network, /// The TCP port the network node will listen on. pub listening_port: u16, + /// The default CLTV expiry delta to be used for payments. + pub default_cltv_expiry_delta: u32, } /// A builder for an [`LdkLite`] instance, allowing to set some configuration and module choices from @@ -112,9 +114,15 @@ impl LdkLiteBuilder { let esplora_server_url = "https://blockstream.info/api".to_string(); let network = bitcoin::Network::Testnet; let listening_port = 9735; - - let config = - LdkLiteConfig { storage_dir_path, esplora_server_url, network, listening_port }; + let default_cltv_expiry_delta = 144; + + let config = LdkLiteConfig { + storage_dir_path, + esplora_server_url, + network, + listening_port, + default_cltv_expiry_delta, + }; Self { config } } @@ -341,6 +349,7 @@ impl LdkLiteBuilder { channel_manager, chain_monitor, peer_manager, + keys_manager, gossip_sync, persister, logger, @@ -373,6 +382,7 @@ pub struct LdkLite { channel_manager: Arc, chain_monitor: Arc, peer_manager: Arc, + keys_manager: Arc, gossip_sync: Arc, persister: Arc, logger: Arc, @@ -567,6 +577,7 @@ impl LdkLite { /// Retrieve a new on-chain/funding address. pub fn new_funding_address(&mut self) -> Result { + // TODO: log if self.running.read().unwrap().is_none() { // We're not running. return Err(Error::NotRunning); @@ -578,10 +589,52 @@ impl LdkLite { // Connect to a node and open a new channel. Disconnects and re-connects should be handled automatically //pub fn connect_open_channel(&mut self, node_id: PublicKey, node_address: NetAddress) -> Result { //} + // // Close a previously opened channel + // pub close_channel(&mut self, channel_id: u64) -> Result<()>; + // + /// Send a payement given an invoice. + pub fn send_payment(&self, invoice: Invoice) -> Result { + // TODO: ensure we never tried paying the given payment hash before + // TODO: log + Ok(self.invoice_payer.pay_invoice(&invoice)?) + } + + /// Send a spontaneous, aka. "keysend", payment + pub fn send_spontaneous_payment( + &self, amount_msat: u64, node_id: PublicKey, + ) -> Result { + // TODO: log + let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); + Ok(self.invoice_payer.pay_pubkey( + node_id, + payment_preimage, + amount_msat, + self.config.default_cltv_expiry_delta, + )?) + } + // + // // Create an invoice to receive a payment + // pub receive_payment(&mut self, amount: Option) -> Invoice; + // + // // Get a new on-chain/funding address. + // pub new_funding_address(&mut self) -> Address; + // + // // Query for information about payment status. + // pub payment_info(&mut self) -> PaymentInfo; + // + // // Query for information about our channels + // pub channel_info(&mut self) -> ChannelInfo; + // + // // Query for information about our on-chain/funding status. + // pub funding_info(&mut self) -> FundingInfo; + //} } async fn connect_peer_if_necessary( - pubkey: PublicKey, peer_addr: SocketAddr, peer_manager: Arc, + // TODO: log + pubkey: PublicKey, + peer_addr: SocketAddr, + peer_manager: Arc, ) -> Result<(), Error> { for node_pubkey in peer_manager.get_peer_node_ids() { if node_pubkey == pubkey { @@ -593,7 +646,10 @@ async fn connect_peer_if_necessary( } async fn do_connect_peer( - pubkey: PublicKey, peer_addr: SocketAddr, peer_manager: Arc, + // TODO: log + pubkey: PublicKey, + peer_addr: SocketAddr, + peer_manager: Arc, ) -> Result<(), Error> { match lightning_net_tokio::connect_outbound(Arc::clone(&peer_manager), pubkey, peer_addr).await { @@ -617,30 +673,6 @@ async fn do_connect_peer( } } -// // Close a previously opened channel -// pub close_channel(&mut self, channel_id: u64) -> Result<()>; -// -// // Pay an invoice -// pub send_payment(&mut self, invoice: Invoice) -> Result -// -// // Send a spontaneous, aka. "keysend", payment -// pub send_spontaneous_payment(&mut self, amount: u64, node_id: PublicKey) -> Result; -// -// // Create an invoice to receive a payment -// pub receive_payment(&mut self, amount: Option) -> Invoice; -// -// // Get a new on-chain/funding address. -// pub new_funding_address(&mut self) -> Address; -// -// // Query for information about payment status. -// pub payment_info(&mut self) -> PaymentInfo; -// -// // Query for information about our channels -// pub channel_info(&mut self) -> ChannelInfo; -// -// // Query for information about our on-chain/funding status. -// pub funding_info(&mut self) -> FundingInfo; -//} // // Structs wrapping the particular information which should easily be // understandable, parseable, and transformable, i.e., we'll try to avoid From 8ec6cafb740f75cbaf73ef2110b82f5dacc64123 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Sat, 27 Aug 2022 11:10:21 +0200 Subject: [PATCH 06/20] f NotRunning --- src/lib.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 188ea7da6..998ac2e0a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -416,7 +416,6 @@ impl LdkLite { pub fn stop(&mut self) -> Result<(), Error> { let mut run_lock = self.running.write().unwrap(); if run_lock.is_none() { - // We're not running. return Err(Error::NotRunning); } @@ -568,7 +567,6 @@ impl LdkLite { /// Returns our own node id pub fn my_node_id(&self) -> Result { if self.running.read().unwrap().is_none() { - // We're not running. return Err(Error::NotRunning); } @@ -579,7 +577,6 @@ impl LdkLite { pub fn new_funding_address(&mut self) -> Result { // TODO: log if self.running.read().unwrap().is_none() { - // We're not running. return Err(Error::NotRunning); } @@ -594,6 +591,9 @@ impl LdkLite { // /// Send a payement given an invoice. pub fn send_payment(&self, invoice: Invoice) -> Result { + if self.running.read().unwrap().is_none() { + return Err(Error::NotRunning); + } // TODO: ensure we never tried paying the given payment hash before // TODO: log Ok(self.invoice_payer.pay_invoice(&invoice)?) @@ -603,6 +603,9 @@ impl LdkLite { pub fn send_spontaneous_payment( &self, amount_msat: u64, node_id: PublicKey, ) -> Result { + if self.running.read().unwrap().is_none() { + return Err(Error::NotRunning); + } // TODO: log let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); Ok(self.invoice_payer.pay_pubkey( From 765821096290af16a75d008405af85b139457ed1 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 29 Aug 2022 14:14:55 +0200 Subject: [PATCH 07/20] Copy logging macros over. --- src/event.rs | 7 +++-- src/lib.rs | 6 ++-- src/logger.rs | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 7 deletions(-) diff --git a/src/event.rs b/src/event.rs index f353229bd..2d97f1abf 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1,13 +1,14 @@ use crate::{ - ChannelManager, EventSender, FilesystemLogger, LdkLiteChainAccess, LdkLiteConfig, NetworkGraph, + ChannelManager, EventSender, LdkLiteChainAccess, LdkLiteConfig, NetworkGraph, PaymentInfoStorage, }; +#[allow(unused_imports)] +use crate::logger::{Logger, FilesystemLogger, log_info, log_error, log_warn, log_trace, log_given_level, log_internal}; + use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::keysinterface::KeysManager; use lightning::util::events as ldk_events; -use lightning::util::logger::Logger; -use lightning::{log_error, log_given_level, log_internal}; use bdk::database::MemoryDatabase; diff --git a/src/lib.rs b/src/lib.rs index 998ac2e0a..8748e25d4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,7 +34,9 @@ use access::LdkLiteChainAccess; pub use error::LdkLiteError as Error; pub use event::LdkLiteEvent; use event::LdkLiteEventHandler; -use logger::FilesystemLogger; + +#[allow(unused_imports)] +use logger::{Logger, FilesystemLogger, log_info, log_error, log_warn, log_trace, log_given_level, log_internal}; use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient}; use lightning::chain::{chainmonitor, Access, BestBlock, Confirm, Filter, Watch}; @@ -49,9 +51,7 @@ use lightning::routing::gossip::P2PGossipSync; use lightning::routing::scoring::ProbabilisticScorer; use lightning::util::config::UserConfig; -use lightning::util::logger::Logger; use lightning::util::ser::ReadableArgs; -use lightning::{log_error, log_given_level, log_internal}; use lightning_background_processor::BackgroundProcessor; use lightning_background_processor::GossipSync as BPGossipSync; diff --git a/src/logger.rs b/src/logger.rs index a5f534aea..d069df26a 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,4 +1,5 @@ -use lightning::util::logger::{Logger, Record}; +pub(crate) use lightning::util::logger::Logger; +use lightning::util::logger::Record; use lightning::util::ser::Writer; use chrono::Utc; @@ -8,6 +9,7 @@ use std::fs; pub(crate) struct FilesystemLogger { data_dir: String, } + impl FilesystemLogger { pub(crate) fn new(data_dir: String) -> Self { let logs_path = format!("{}/logs", data_dir); @@ -36,3 +38,77 @@ impl Logger for FilesystemLogger { .unwrap(); } } + + +// TODO: We copied the logging macros for now from `lightning::util::macro_logger`. We should +// switch back to using them from upstream after the next release, which includes their export. +macro_rules! log_internal { + ($logger: expr, $lvl:expr, $($arg:tt)+) => ( + $logger.log(&lightning::util::logger::Record::new($lvl, format_args!($($arg)+), module_path!(), file!(), line!())) + ); +} +pub(crate) use log_internal; + +macro_rules! log_given_level { + ($logger: expr, $lvl:expr, $($arg:tt)+) => ( + match $lvl { + #[cfg(not(any(feature = "max_level_off")))] + lightning::util::logger::Level::Error => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error")))] + lightning::util::logger::Level::Warn => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn")))] + lightning::util::logger::Level::Info => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info")))] + lightning::util::logger::Level::Debug => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug")))] + lightning::util::logger::Level::Trace => log_internal!($logger, $lvl, $($arg)*), + #[cfg(not(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug", feature = "max_level_trace")))] + lightning::util::logger::Level::Gossip => log_internal!($logger, $lvl, $($arg)*), + + #[cfg(any(feature = "max_level_off", feature = "max_level_error", feature = "max_level_warn", feature = "max_level_info", feature = "max_level_debug", feature = "max_level_trace"))] + _ => { + // The level is disabled at compile-time + }, + } + ); +} +pub(crate) use log_given_level; + +#[allow(unused_macros)] +macro_rules! log_error { + ($logger: expr, $($arg:tt)*) => ( + log_given_level!($logger, lightning::util::logger::Level::Error, $($arg)*) + ) +} +pub(crate) use log_error; + +#[allow(unused_macros)] +macro_rules! log_warn { + ($logger: expr, $($arg:tt)*) => ( + log_given_level!($logger, lightning::util::logger::Level::Warn, $($arg)*) + ) +} +pub(crate) use log_warn; + +#[allow(unused_macros)] +macro_rules! log_info { + ($logger: expr, $($arg:tt)*) => ( + log_given_level!($logger, lightning::util::logger::Level::Info, $($arg)*) + ) +} +pub(crate) use log_info; + +#[allow(unused_macros)] +macro_rules! log_debug { + ($logger: expr, $($arg:tt)*) => ( + log_given_level!($logger, lightning::util::logger::Level::Debug, $($arg)*) + ) +} + +#[allow(unused_macros)] +macro_rules! log_trace { + ($logger: expr, $($arg:tt)*) => ( + log_given_level!($logger, lightning::util::logger::Level::Trace, $($arg)*) + ) +} +pub(crate) use log_trace; From c032a101eb4d99752442e6a3bc540c986a51a77d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 29 Aug 2022 14:55:15 +0200 Subject: [PATCH 08/20] Impl. logging, `PaymentInfo` storage and querying --- src/event.rs | 5 +- src/lib.rs | 161 ++++++++++++++++++++++++++++++++++++++++++-------- src/logger.rs | 1 - 3 files changed, 139 insertions(+), 28 deletions(-) diff --git a/src/event.rs b/src/event.rs index 2d97f1abf..ed0e37411 100644 --- a/src/event.rs +++ b/src/event.rs @@ -4,7 +4,10 @@ use crate::{ }; #[allow(unused_imports)] -use crate::logger::{Logger, FilesystemLogger, log_info, log_error, log_warn, log_trace, log_given_level, log_internal}; +use crate::logger::{ + log_error, log_given_level, log_info, log_internal, log_trace, log_warn, FilesystemLogger, + Logger, +}; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::keysinterface::KeysManager; diff --git a/src/lib.rs b/src/lib.rs index 8748e25d4..cd1e5dd0c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -36,16 +36,19 @@ pub use event::LdkLiteEvent; use event::LdkLiteEventHandler; #[allow(unused_imports)] -use logger::{Logger, FilesystemLogger, log_info, log_error, log_warn, log_trace, log_given_level, log_internal}; +use logger::{ + log_error, log_given_level, log_info, log_internal, log_trace, log_warn, FilesystemLogger, + Logger, +}; use lightning::chain::keysinterface::{InMemorySigner, KeysInterface, KeysManager, Recipient}; use lightning::chain::{chainmonitor, Access, BestBlock, Confirm, Filter, Watch}; use lightning::ln::channelmanager; use lightning::ln::channelmanager::{ - ChainParameters, ChannelManagerReadArgs, PaymentId, SimpleArcChannelManager, + ChainParameters, ChannelManagerReadArgs, SimpleArcChannelManager, }; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler, SimpleArcPeerManager}; -use lightning::ln::{PaymentHash, PaymentPreimage}; +use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::routing::gossip; use lightning::routing::gossip::P2PGossipSync; use lightning::routing::scoring::ProbabilisticScorer; @@ -66,6 +69,8 @@ use bdk::blockchain::esplora::EsploraBlockchain; use bdk::blockchain::{GetBlockHash, GetHeight}; use bdk::database::MemoryDatabase; +use bitcoin::hashes::sha256::Hash as Sha256; +use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; use bitcoin::BlockHash; @@ -306,8 +311,8 @@ impl LdkLiteBuilder { // Step 13: Init payment info storage // TODO: persist payment info to disk - let _inbound_payments = Arc::new(Mutex::new(HashMap::new())); - let _outbound_payments = Arc::new(Mutex::new(HashMap::new())); + let inbound_payments = Arc::new(Mutex::new(HashMap::new())); + let outbound_payments = Arc::new(Mutex::new(HashMap::new())); // Step 14: Handle LDK Events let event_queue = mpsc::sync_channel(CHANNEL_BUF_SIZE); @@ -317,8 +322,8 @@ impl LdkLiteBuilder { Arc::clone(&channel_manager), Arc::clone(&network_graph), Arc::clone(&keys_manager), - Arc::clone(&_inbound_payments), - Arc::clone(&_outbound_payments), + Arc::clone(&inbound_payments), + Arc::clone(&outbound_payments), event_sender, Arc::clone(&logger), Arc::clone(&config), @@ -356,8 +361,8 @@ impl LdkLiteBuilder { network_graph, scorer, invoice_payer, - _inbound_payments, - _outbound_payments, + inbound_payments, + outbound_payments, event_queue, }) } @@ -389,8 +394,8 @@ pub struct LdkLite { scorer: Arc>, network_graph: Arc, invoice_payer: Arc>, - _inbound_payments: Arc, - _outbound_payments: Arc, + inbound_payments: Arc, + outbound_payments: Arc, event_queue: (EventSender, EventReceiver), } @@ -575,12 +580,13 @@ impl LdkLite { /// Retrieve a new on-chain/funding address. pub fn new_funding_address(&mut self) -> Result { - // TODO: log if self.running.read().unwrap().is_none() { return Err(Error::NotRunning); } - self.chain_access.get_new_address() + let funding_address = self.chain_access.get_new_address()?; + log_info!(self.logger, "generated new funding address: {}", funding_address); + Ok(funding_address) } // Connect to a node and open a new channel. Disconnects and re-connects should be handled automatically @@ -590,40 +596,120 @@ impl LdkLite { // pub close_channel(&mut self, channel_id: u64) -> Result<()>; // /// Send a payement given an invoice. - pub fn send_payment(&self, invoice: Invoice) -> Result { + pub fn send_payment(&self, invoice: Invoice) -> Result { if self.running.read().unwrap().is_none() { return Err(Error::NotRunning); } + // TODO: ensure we never tried paying the given payment hash before - // TODO: log - Ok(self.invoice_payer.pay_invoice(&invoice)?) + let status = match self.invoice_payer.pay_invoice(&invoice) { + Ok(_payment_id) => { + let payee_pubkey = invoice.recover_payee_pub_key(); + // TODO: is this unwrap safe? Would a payment to an invoice with None amount ever + // succeed? + let amt_msat = invoice.amount_milli_satoshis().unwrap(); + log_info!(self.logger, "initiated sending {} msats to {}", amt_msat, payee_pubkey); + PaymentStatus::Pending + } + Err(payment::PaymentError::Invoice(e)) => { + log_error!(self.logger, "invalid invoice: {}", e); + return Err(Error::Payment(payment::PaymentError::Invoice(e))); + } + Err(payment::PaymentError::Routing(e)) => { + log_error!(self.logger, "failed to find route: {}", e.err); + return Err(Error::Payment(payment::PaymentError::Routing(e))); + } + Err(payment::PaymentError::Sending(e)) => { + log_error!(self.logger, "failed to send payment: {:?}", e); + PaymentStatus::Failed + } + }; + + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + let payment_secret = Some(invoice.payment_secret().clone()); + + let mut outbound_payments_lock = self.outbound_payments.lock().unwrap(); + outbound_payments_lock.insert( + payment_hash, + PaymentInfo { + preimage: None, + secret: payment_secret, + status, + amount_msat: invoice.amount_milli_satoshis(), + }, + ); + + Ok(payment_hash) } /// Send a spontaneous, aka. "keysend", payment pub fn send_spontaneous_payment( &self, amount_msat: u64, node_id: PublicKey, - ) -> Result { + ) -> Result { if self.running.read().unwrap().is_none() { return Err(Error::NotRunning); } - // TODO: log + let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes()); - Ok(self.invoice_payer.pay_pubkey( + let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()); + + let status = match self.invoice_payer.pay_pubkey( node_id, payment_preimage, amount_msat, self.config.default_cltv_expiry_delta, - )?) + ) { + Ok(_payment_id) => { + log_info!(self.logger, "initiated sending {} msats to {}", amount_msat, node_id); + PaymentStatus::Pending + } + Err(payment::PaymentError::Invoice(e)) => { + log_error!(self.logger, "invalid invoice: {}", e); + return Err(Error::Payment(payment::PaymentError::Invoice(e))); + } + Err(payment::PaymentError::Routing(e)) => { + log_error!(self.logger, "failed to find route: {}", e.err); + return Err(Error::Payment(payment::PaymentError::Routing(e))); + } + Err(payment::PaymentError::Sending(e)) => { + log_error!(self.logger, "failed to send payment: {:?}", e); + PaymentStatus::Failed + } + }; + + let mut outbound_payments_lock = self.outbound_payments.lock().unwrap(); + outbound_payments_lock.insert( + payment_hash, + PaymentInfo { preimage: None, secret: None, status, amount_msat: Some(amount_msat) }, + ); + + Ok(payment_hash) } // // // Create an invoice to receive a payment // pub receive_payment(&mut self, amount: Option) -> Invoice; // - // // Get a new on-chain/funding address. - // pub new_funding_address(&mut self) -> Address; - // - // // Query for information about payment status. - // pub payment_info(&mut self) -> PaymentInfo; + /// Query for information about the status of a specific payment. + pub fn payment_info(&mut self, payment_hash: &[u8; 32]) -> Option { + let payment_hash = PaymentHash(*payment_hash); + + { + let outbound_payments_lock = self.outbound_payments.lock().unwrap(); + if let Some(payment_info) = outbound_payments_lock.get(&payment_hash) { + return Some((*payment_info).clone()); + } + } + + { + let inbound_payments_lock = self.inbound_payments.lock().unwrap(); + if let Some(payment_info) = inbound_payments_lock.get(&payment_hash) { + return Some((*payment_info).clone()); + } + } + + None + } + // // // Query for information about our channels // pub channel_info(&mut self) -> ChannelInfo; @@ -680,7 +766,30 @@ async fn do_connect_peer( // Structs wrapping the particular information which should easily be // understandable, parseable, and transformable, i.e., we'll try to avoid // exposing too many technical detail here. -struct PaymentInfo; +/// Represents a payment. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct PaymentInfo { + /// The pre-image used by the payment. + pub preimage: Option, + /// The secret used by the payment. + pub secret: Option, + /// The status of the payment. + pub status: PaymentStatus, + /// The amount transferred. + pub amount_msat: Option, +} + +/// Represents the current status of a payment. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum PaymentStatus { + /// The payment is still pending. + Pending, + /// The payment suceeded. + Succeeded, + /// The payment failed. + Failed, +} + //struct ChannelInfo; //struct FundingInfo; diff --git a/src/logger.rs b/src/logger.rs index d069df26a..6f864c069 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -39,7 +39,6 @@ impl Logger for FilesystemLogger { } } - // TODO: We copied the logging macros for now from `lightning::util::macro_logger`. We should // switch back to using them from upstream after the next release, which includes their export. macro_rules! log_internal { From 5b9e4c40a409c1e93d3faa7418572a13fac2b1b5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 29 Aug 2022 15:23:18 +0200 Subject: [PATCH 09/20] Impl `receive_payment` --- src/error.rs | 9 ++++++++ src/lib.rs | 63 ++++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 63 insertions(+), 9 deletions(-) diff --git a/src/error.rs b/src/error.rs index 45b05100a..c8f8032c5 100644 --- a/src/error.rs +++ b/src/error.rs @@ -26,6 +26,8 @@ pub enum LdkLiteError { Decode(msgs::DecodeError), /// A wrapped LDK `PaymentError` Payment(payment::PaymentError), + /// A wrapped LDK `SignOrCreationError` + InvoiceCreation(lightning_invoice::SignOrCreationError), /// A wrapped BDK error Bdk(bdk::Error), /// A wrapped `Bip32` error @@ -46,6 +48,7 @@ impl fmt::Display for LdkLiteError { LdkLiteError::Decode(ref e) => write!(f, "LDK decode error: {}", e), // TODO: print more sensible things based on the type of payment error LdkLiteError::Payment(ref e) => write!(f, "LDK payment error: {:?}", e), + LdkLiteError::InvoiceCreation(ref e) => write!(f, "LDK invoice sign or creation error: {:?}", e), LdkLiteError::Bdk(ref e) => write!(f, "BDK error: {}", e), LdkLiteError::Bip32(ref e) => write!(f, "Bitcoin error: {}", e), LdkLiteError::Io(ref e) => write!(f, "IO error: {}", e), @@ -74,6 +77,12 @@ impl From for LdkLiteError { } } +impl From for LdkLiteError { + fn from(e: lightning_invoice::SignOrCreationError) -> Self { + Self::InvoiceCreation(e) + } +} + impl From for LdkLiteError { fn from(e: bdk::Error) -> Self { Self::Bdk(e) diff --git a/src/lib.rs b/src/lib.rs index cd1e5dd0c..1ec0a5126 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -63,7 +63,7 @@ use lightning_persister::FilesystemPersister; use lightning_net_tokio::SocketDescriptor; use lightning_invoice::utils::DefaultRouter; -use lightning_invoice::{payment, Invoice}; +use lightning_invoice::{payment, Currency, Invoice}; use bdk::blockchain::esplora::EsploraBlockchain; use bdk::blockchain::{GetBlockHash, GetHeight}; @@ -606,7 +606,8 @@ impl LdkLite { Ok(_payment_id) => { let payee_pubkey = invoice.recover_payee_pub_key(); // TODO: is this unwrap safe? Would a payment to an invoice with None amount ever - // succeed? + // succeed? Should we allow to set the amount in the interface or via a dedicated + // method? let amt_msat = invoice.amount_milli_satoshis().unwrap(); log_info!(self.logger, "initiated sending {} msats to {}", amt_msat, payee_pubkey); PaymentStatus::Pending @@ -685,12 +686,56 @@ impl LdkLite { Ok(payment_hash) } - // - // // Create an invoice to receive a payment - // pub receive_payment(&mut self, amount: Option) -> Invoice; - // + + // TODO: Should we provide a configurable default for the expiry, or force the user to supply it on every call? + /// Returns a payable invoice that can be used to request and receive a payment. + pub fn receive_payment( + &self, amount_msat: Option, description: &str, expiry_secs: u32, + ) -> Result { + let mut inbound_payments_lock = self.inbound_payments.lock().unwrap(); + + let currency = match self.config.network { + bitcoin::Network::Bitcoin => Currency::Bitcoin, + bitcoin::Network::Testnet => Currency::BitcoinTestnet, + bitcoin::Network::Regtest => Currency::Regtest, + bitcoin::Network::Signet => Currency::Signet, + }; + let keys_manager = Arc::clone(&self.keys_manager); + let invoice = match lightning_invoice::utils::create_invoice_from_channelmanager( + &self.channel_manager, + keys_manager, + currency, + amount_msat, + description.to_string(), + expiry_secs, + ) { + Ok(inv) => { + log_info!(self.logger, "generated invoice: {}", inv); + inv + } + Err(e) => { + let err_str = &e.to_string(); + log_error!(self.logger, "failed to create invoice: {:?}", err_str); + // TODO; + return Err(Error::InvoiceCreation(e)); + } + }; + + let payment_hash = PaymentHash(invoice.payment_hash().clone().into_inner()); + inbound_payments_lock.insert( + payment_hash, + PaymentInfo { + preimage: None, + secret: Some(invoice.payment_secret().clone()), + status: PaymentStatus::Pending, + amount_msat, + }, + ); + Ok(invoice) + } + /// Query for information about the status of a specific payment. - pub fn payment_info(&mut self, payment_hash: &[u8; 32]) -> Option { + pub fn payment_info(&self, payment_hash: &[u8; 32]) -> Option { let payment_hash = PaymentHash(*payment_hash); { @@ -712,10 +757,10 @@ impl LdkLite { // // // Query for information about our channels - // pub channel_info(&mut self) -> ChannelInfo; + // pub channel_info(&self) -> ChannelInfo; // // // Query for information about our on-chain/funding status. - // pub funding_info(&mut self) -> FundingInfo; + // pub funding_info(&self) -> FundingInfo; //} } From 48eecd2fbe904b2f24ff2b12d266795684504a0c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 29 Aug 2022 16:29:44 +0200 Subject: [PATCH 10/20] More logging, rename wrapped errors --- src/error.rs | 61 +++++++++++++++++++++++++++++------------------- src/lib.rs | 38 ++++++++++++++---------------- src/util.rs | 66 ++++++++++++++++++++++++---------------------------- 3 files changed, 84 insertions(+), 81 deletions(-) diff --git a/src/error.rs b/src/error.rs index c8f8032c5..e85d729a2 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,5 +1,6 @@ use bdk::blockchain::esplora; use lightning::ln::msgs; +use lightning::util::errors; use lightning_invoice::payment; use std::fmt; use std::io; @@ -22,64 +23,76 @@ pub enum LdkLiteError { ChainStateMismatch, /// A network connection has been closed. ConnectionClosed, + /// A given peer info could not be parsed. + PeerInfoParse(&'static str), + /// A wrapped LDK `APIError` + LdkApi(errors::APIError), /// A wrapped LDK `DecodeError` - Decode(msgs::DecodeError), + LdkDecode(msgs::DecodeError), /// A wrapped LDK `PaymentError` - Payment(payment::PaymentError), + LdkPayment(payment::PaymentError), /// A wrapped LDK `SignOrCreationError` - InvoiceCreation(lightning_invoice::SignOrCreationError), + LdkInvoiceCreation(lightning_invoice::SignOrCreationError), /// A wrapped BDK error Bdk(bdk::Error), + /// A wrapped `EsploraError` + Esplora(esplora::EsploraError), /// A wrapped `Bip32` error Bip32(bitcoin::util::bip32::Error), /// A wrapped `std::io::Error` - Io(io::Error), + StdIo(io::Error), /// A wrapped `SystemTimeError` - Time(time::SystemTimeError), - /// A wrapped `EsploraError` - Esplora(esplora::EsploraError), + StdTime(time::SystemTimeError), /// A wrapped `mpsc::RecvError` - ChannelRecv(mpsc::RecvError), + StdChannelRecv(mpsc::RecvError), } impl fmt::Display for LdkLiteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - LdkLiteError::Decode(ref e) => write!(f, "LDK decode error: {}", e), - // TODO: print more sensible things based on the type of payment error - LdkLiteError::Payment(ref e) => write!(f, "LDK payment error: {:?}", e), - LdkLiteError::InvoiceCreation(ref e) => write!(f, "LDK invoice sign or creation error: {:?}", e), - LdkLiteError::Bdk(ref e) => write!(f, "BDK error: {}", e), - LdkLiteError::Bip32(ref e) => write!(f, "Bitcoin error: {}", e), - LdkLiteError::Io(ref e) => write!(f, "IO error: {}", e), - LdkLiteError::Time(ref e) => write!(f, "time error: {}", e), - LdkLiteError::Esplora(ref e) => write!(f, "Esplora error: {}", e), - LdkLiteError::ChannelRecv(ref e) => write!(f, "channel recv error: {}", e), LdkLiteError::AlreadyRunning => write!(f, "LDKLite is already running."), LdkLiteError::NotRunning => write!(f, "LDKLite is not running."), LdkLiteError::FundingTxNonWitnessOuputSpend => write!(f, "an input of the funding transaction tried spending a non-SegWit output, which is insecure"), LdkLiteError::FundingTxNotFinalized => write!(f, "the funding transaction could not be finalized"), LdkLiteError::ChainStateMismatch => write!(f, "ChainStateMismatch"), LdkLiteError::ConnectionClosed => write!(f, "network connection closed"), + LdkLiteError::PeerInfoParse(ref e) => write!(f, "given peer info could not be parsed: {}", e), + LdkLiteError::LdkDecode(ref e) => write!(f, "LDK decode error: {}", e), + LdkLiteError::LdkApi(ref e) => write!(f, "LDK API error: {:?}", e), + // TODO: print more sensible things based on the type of payment error + LdkLiteError::LdkPayment(ref e) => write!(f, "LDK payment error: {:?}", e), + LdkLiteError::LdkInvoiceCreation(ref e) => write!(f, "LDK invoice sign or creation error: {:?}", e), + LdkLiteError::Bdk(ref e) => write!(f, "BDK error: {}", e), + LdkLiteError::Esplora(ref e) => write!(f, "Esplora error: {}", e), + LdkLiteError::Bip32(ref e) => write!(f, "Bitcoin error: {}", e), + LdkLiteError::StdIo(ref e) => write!(f, "IO error: {}", e), + LdkLiteError::StdTime(ref e) => write!(f, "time error: {}", e), + LdkLiteError::StdChannelRecv(ref e) => write!(f, "channel recv error: {}", e), } } } +impl From for LdkLiteError { + fn from(e: errors::APIError) -> Self { + Self::LdkApi(e) + } +} + impl From for LdkLiteError { fn from(e: msgs::DecodeError) -> Self { - Self::Decode(e) + Self::LdkDecode(e) } } impl From for LdkLiteError { fn from(e: payment::PaymentError) -> Self { - Self::Payment(e) + Self::LdkPayment(e) } } impl From for LdkLiteError { fn from(e: lightning_invoice::SignOrCreationError) -> Self { - Self::InvoiceCreation(e) + Self::LdkInvoiceCreation(e) } } @@ -103,19 +116,19 @@ impl From for LdkLiteError { impl From for LdkLiteError { fn from(e: io::Error) -> Self { - Self::Io(e) + Self::StdIo(e) } } impl From for LdkLiteError { fn from(e: time::SystemTimeError) -> Self { - Self::Time(e) + Self::StdTime(e) } } impl From for LdkLiteError { fn from(e: mpsc::RecvError) -> Self { - Self::ChannelRecv(e) + Self::StdChannelRecv(e) } } diff --git a/src/lib.rs b/src/lib.rs index 1ec0a5126..71f57a522 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -525,6 +525,7 @@ impl LdkLite { *pubkey, peer_addr.clone(), Arc::clone(&connect_pm), + Arc::clone(&connect_logger), ) .await; } @@ -614,11 +615,11 @@ impl LdkLite { } Err(payment::PaymentError::Invoice(e)) => { log_error!(self.logger, "invalid invoice: {}", e); - return Err(Error::Payment(payment::PaymentError::Invoice(e))); + return Err(Error::LdkPayment(payment::PaymentError::Invoice(e))); } Err(payment::PaymentError::Routing(e)) => { log_error!(self.logger, "failed to find route: {}", e.err); - return Err(Error::Payment(payment::PaymentError::Routing(e))); + return Err(Error::LdkPayment(payment::PaymentError::Routing(e))); } Err(payment::PaymentError::Sending(e)) => { log_error!(self.logger, "failed to send payment: {:?}", e); @@ -666,11 +667,11 @@ impl LdkLite { } Err(payment::PaymentError::Invoice(e)) => { log_error!(self.logger, "invalid invoice: {}", e); - return Err(Error::Payment(payment::PaymentError::Invoice(e))); + return Err(Error::LdkPayment(payment::PaymentError::Invoice(e))); } Err(payment::PaymentError::Routing(e)) => { log_error!(self.logger, "failed to find route: {}", e.err); - return Err(Error::Payment(payment::PaymentError::Routing(e))); + return Err(Error::LdkPayment(payment::PaymentError::Routing(e))); } Err(payment::PaymentError::Sending(e)) => { log_error!(self.logger, "failed to send payment: {:?}", e); @@ -716,8 +717,7 @@ impl LdkLite { Err(e) => { let err_str = &e.to_string(); log_error!(self.logger, "failed to create invoice: {:?}", err_str); - // TODO; - return Err(Error::InvoiceCreation(e)); + return Err(Error::LdkInvoiceCreation(e)); } }; @@ -765,10 +765,8 @@ impl LdkLite { } async fn connect_peer_if_necessary( - // TODO: log - pubkey: PublicKey, - peer_addr: SocketAddr, - peer_manager: Arc, + pubkey: PublicKey, peer_addr: SocketAddr, peer_manager: Arc, + logger: Arc, ) -> Result<(), Error> { for node_pubkey in peer_manager.get_peer_node_ids() { if node_pubkey == pubkey { @@ -776,15 +774,14 @@ async fn connect_peer_if_necessary( } } - do_connect_peer(pubkey, peer_addr, peer_manager).await + do_connect_peer(pubkey, peer_addr, peer_manager, logger).await } async fn do_connect_peer( - // TODO: log - pubkey: PublicKey, - peer_addr: SocketAddr, - peer_manager: Arc, + pubkey: PublicKey, peer_addr: SocketAddr, peer_manager: Arc, + logger: Arc, ) -> Result<(), Error> { + log_info!(logger, "connecting to peer: {}@{}", pubkey, peer_addr); match lightning_net_tokio::connect_outbound(Arc::clone(&peer_manager), pubkey, peer_addr).await { Some(connection_closed_future) => { @@ -792,6 +789,7 @@ async fn do_connect_peer( loop { match futures::poll!(&mut connection_closed_future) { std::task::Poll::Ready(_) => { + log_info!(logger, "peer connection closed: {}@{}", pubkey, peer_addr); return Err(Error::ConnectionClosed); } std::task::Poll::Pending => {} @@ -803,7 +801,10 @@ async fn do_connect_peer( } } } - None => Err(Error::ConnectionClosed), + None => { + log_error!(logger, "failed to connect to peer: {}@{}", pubkey, peer_addr); + Err(Error::ConnectionClosed) + } } } @@ -835,11 +836,6 @@ pub enum PaymentStatus { Failed, } -//struct ChannelInfo; -//struct FundingInfo; - -/// TODO - type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, Arc, diff --git a/src/util.rs b/src/util.rs index 9cf5811e2..1fafbaa14 100644 --- a/src/util.rs +++ b/src/util.rs @@ -1,6 +1,6 @@ use crate::error::LdkLiteError as Error; -use crate::logger::FilesystemLogger; -use crate::{LdkLiteConfig, NetworkGraph, Scorer}; + +use crate::{LdkLiteConfig, NetworkGraph, Scorer, FilesystemLogger}; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters}; use lightning::util::ser::ReadableArgs; @@ -26,7 +26,7 @@ pub(crate) fn read_or_generate_seed_file(config: Arc) -> Result<[ let mut key = [0; 32]; thread_rng().fill_bytes(&mut key); - let mut f = fs::File::create(keys_seed_path.clone()).map_err(|e| Error::Io(e))?; + let mut f = fs::File::create(keys_seed_path.clone()).map_err(|e| Error::StdIo(e))?; f.write_all(&key).expect("Failed to write node keys seed to disk"); f.sync_all().expect("Failed to sync node keys seed to disk"); key @@ -82,7 +82,7 @@ pub(crate) fn read_channel_peer_data( Ok((pubkey, socket_addr)) => { peer_data.insert(pubkey, socket_addr); } - Err(e) => return Err(Error::Io(e)), + Err(e) => return Err(e), } } } @@ -98,38 +98,6 @@ pub(crate) fn persist_channel_peer( file.write_all(format!("{}\n", peer_info).as_bytes()) } -pub(crate) fn parse_peer_info( - peer_pubkey_and_ip_addr: String, -) -> Result<(PublicKey, SocketAddr), std::io::Error> { - let mut pubkey_and_addr = peer_pubkey_and_ip_addr.split("@"); - let pubkey = pubkey_and_addr.next(); - let peer_addr_str = pubkey_and_addr.next(); - if peer_addr_str.is_none() || peer_addr_str.is_none() { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "ERROR: incorrectly formatted peer info. Should be formatted as: `pubkey@host:port`", - )); - } - - let peer_addr = peer_addr_str.unwrap().to_socket_addrs().map(|mut r| r.next()); - if peer_addr.is_err() || peer_addr.as_ref().unwrap().is_none() { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "ERROR: couldn't parse pubkey@host:port into a socket address", - )); - } - - let pubkey = hex_to_compressed_pubkey(pubkey.unwrap()); - if pubkey.is_none() { - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - "ERROR: unable to parse given pubkey for node", - )); - } - - Ok((pubkey.unwrap(), peer_addr.unwrap().unwrap())) -} - pub fn hex_to_vec(hex: &str) -> Option> { let mut out = Vec::with_capacity(hex.len() / 2); @@ -173,3 +141,29 @@ pub fn hex_to_compressed_pubkey(hex: &str) -> Option { Err(_) => None, } } + +// TODO: handle different kinds of NetAddress, e.g., the Hostname field. +pub(crate) fn parse_peer_info( + peer_pubkey_and_ip_addr: String, +) -> Result<(PublicKey, SocketAddr), Error> { + let mut pubkey_and_addr = peer_pubkey_and_ip_addr.split("@"); + let pubkey = pubkey_and_addr.next(); + let peer_addr_str = pubkey_and_addr.next(); + if peer_addr_str.is_none() || peer_addr_str.is_none() { + return Err(Error::PeerInfoParse( + "Incorrect format. Should be formatted as: `pubkey@host:port`.", + )); + } + + let peer_addr = peer_addr_str.unwrap().to_socket_addrs().map(|mut r| r.next()); + if peer_addr.is_err() || peer_addr.as_ref().unwrap().is_none() { + return Err(Error::PeerInfoParse("Couldn't parse pubkey@host:port into a socket address.")); + } + + let pubkey = hex_to_compressed_pubkey(pubkey.unwrap()); + if pubkey.is_none() { + return Err(Error::PeerInfoParse("Unable to parse pubkey for node.")); + } + + Ok((pubkey.unwrap(), peer_addr.unwrap().unwrap())) +} From b6d068fffd6e763ee0a2fbc63c1ed9ec62d4b2e1 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 09:38:28 +0200 Subject: [PATCH 11/20] Break up `util` into `io` and `hex` --- src/hex.rs | 45 +++++++++++++++++++++++++++++++++++++++++ src/{util.rs => io.rs} | 46 ++---------------------------------------- src/lib.rs | 11 +++++----- 3 files changed, 53 insertions(+), 49 deletions(-) create mode 100644 src/hex.rs rename src/{util.rs => io.rs} (82%) diff --git a/src/hex.rs b/src/hex.rs new file mode 100644 index 000000000..f1ca00c35 --- /dev/null +++ b/src/hex.rs @@ -0,0 +1,45 @@ +use bitcoin::secp256k1::PublicKey; + +pub fn to_vec(hex: &str) -> Option> { + let mut out = Vec::with_capacity(hex.len() / 2); + + let mut b = 0; + for (idx, c) in hex.as_bytes().iter().enumerate() { + b <<= 4; + match *c { + b'A'..=b'F' => b |= c - b'A' + 10, + b'a'..=b'f' => b |= c - b'a' + 10, + b'0'..=b'9' => b |= c - b'0', + _ => return None, + } + if (idx & 1) == 1 { + out.push(b); + b = 0; + } + } + + Some(out) +} + +#[inline] +pub fn to_string(value: &[u8]) -> String { + let mut res = String::with_capacity(64); + for v in value { + res += &format!("{:02x}", v); + } + res +} + +pub fn to_compressed_pubkey(hex: &str) -> Option { + if hex.len() != 33 * 2 { + return None; + } + let data = match to_vec(&hex[0..33 * 2]) { + Some(bytes) => bytes, + None => return None, + }; + match PublicKey::from_slice(&data) { + Ok(pk) => Some(pk), + Err(_) => None, + } +} diff --git a/src/util.rs b/src/io.rs similarity index 82% rename from src/util.rs rename to src/io.rs index 1fafbaa14..1e3489f68 100644 --- a/src/util.rs +++ b/src/io.rs @@ -1,6 +1,7 @@ use crate::error::LdkLiteError as Error; use crate::{LdkLiteConfig, NetworkGraph, Scorer, FilesystemLogger}; +use crate::hex; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters}; use lightning::util::ser::ReadableArgs; @@ -98,49 +99,6 @@ pub(crate) fn persist_channel_peer( file.write_all(format!("{}\n", peer_info).as_bytes()) } -pub fn hex_to_vec(hex: &str) -> Option> { - let mut out = Vec::with_capacity(hex.len() / 2); - - let mut b = 0; - for (idx, c) in hex.as_bytes().iter().enumerate() { - b <<= 4; - match *c { - b'A'..=b'F' => b |= c - b'A' + 10, - b'a'..=b'f' => b |= c - b'a' + 10, - b'0'..=b'9' => b |= c - b'0', - _ => return None, - } - if (idx & 1) == 1 { - out.push(b); - b = 0; - } - } - - Some(out) -} - -#[inline] -pub fn hex_str(value: &[u8]) -> String { - let mut res = String::with_capacity(64); - for v in value { - res += &format!("{:02x}", v); - } - res -} - -pub fn hex_to_compressed_pubkey(hex: &str) -> Option { - if hex.len() != 33 * 2 { - return None; - } - let data = match hex_to_vec(&hex[0..33 * 2]) { - Some(bytes) => bytes, - None => return None, - }; - match PublicKey::from_slice(&data) { - Ok(pk) => Some(pk), - Err(_) => None, - } -} // TODO: handle different kinds of NetAddress, e.g., the Hostname field. pub(crate) fn parse_peer_info( @@ -160,7 +118,7 @@ pub(crate) fn parse_peer_info( return Err(Error::PeerInfoParse("Couldn't parse pubkey@host:port into a socket address.")); } - let pubkey = hex_to_compressed_pubkey(pubkey.unwrap()); + let pubkey = hex::to_compressed_pubkey(pubkey.unwrap()); if pubkey.is_none() { return Err(Error::PeerInfoParse("Unable to parse pubkey for node.")); } diff --git a/src/lib.rs b/src/lib.rs index 71f57a522..9525eeb3f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,8 @@ mod access; mod error; mod event; mod logger; -mod util; +mod io; +mod hex; use access::LdkLiteChainAccess; pub use error::LdkLiteError as Error; @@ -192,7 +193,7 @@ impl LdkLiteBuilder { let logger = Arc::new(FilesystemLogger::new(config.storage_dir_path.clone())); // Step 1: Initialize the on-chain wallet and chain access - let seed = util::read_or_generate_seed_file(Arc::clone(&config))?; + let seed = io::read_or_generate_seed_file(Arc::clone(&config))?; let xprv = bitcoin::util::bip32::ExtendedPrivKey::new_master(config.network, &seed)?; let bdk_wallet = bdk::Wallet::new( @@ -279,7 +280,7 @@ impl LdkLiteBuilder { // TODO: Use RGS on first start, if configured // Step 10: Initialize the P2PGossipSync let network_graph = - Arc::new(util::read_network_graph(Arc::clone(&config), Arc::clone(&logger))?); + Arc::new(io::read_network_graph(Arc::clone(&config), Arc::clone(&logger))?); let gossip_sync = Arc::new(P2PGossipSync::new( Arc::clone(&network_graph), None::>, @@ -303,7 +304,7 @@ impl LdkLiteBuilder { )); // Step 12: Initialize routing ProbabilisticScorer - let scorer = Arc::new(Mutex::new(util::read_scorer( + let scorer = Arc::new(Mutex::new(io::read_scorer( Arc::clone(&config), Arc::clone(&network_graph), Arc::clone(&logger), @@ -510,7 +511,7 @@ impl LdkLite { return; } interval.tick().await; - match util::read_channel_peer_data(Arc::clone(&connect_config)) { + match io::read_channel_peer_data(Arc::clone(&connect_config)) { Ok(info) => { let peers = connect_pm.get_peer_node_ids(); for node_id in connect_cm From 5c50705fd757957b6ce632f3c2e452d78b84ad80 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 10:26:22 +0200 Subject: [PATCH 12/20] Move magic numbers to constants --- src/lib.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 9525eeb3f..b7d19517c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,8 +88,19 @@ use std::time::{Duration, SystemTime}; // TODO: Is MemoryDatabase okay to use? +// The number of messages we buffer in the used channels. const CHANNEL_BUF_SIZE: usize = 1000; +// The used 'stop gap' parameter used by BDK's wallet sync. This seems to configure the threshold +// number of blocks after which BDK stops looking for scripts belonging to the wallet. +const BDK_CLIENT_STOP_GAP: usize = 20; + +// The number of concurrent requests made against the API provider. +const BDK_CLIENT_CONCURRENCY: u8 = 8; + +// The timeout after which we abandon retrying failed payments. +const LDK_PAYMENT_RETRY_TIMEOUT: Duration = Duration::from_secs(10); + #[derive(Debug, Clone)] /// Represents the configuration of an `LdkLite` instance. pub struct LdkLiteConfig { @@ -205,7 +216,7 @@ impl LdkLiteBuilder { // TODO: Check that we can be sure that the Esplora client re-connects in case of failure // and and exits cleanly on drop. Otherwise we need to handle this/move it to the runtime? - let blockchain = EsploraBlockchain::new(&config.esplora_server_url, 20).with_concurrency(8); + let blockchain = EsploraBlockchain::new(&config.esplora_server_url, BDK_CLIENT_STOP_GAP).with_concurrency(BDK_CLIENT_CONCURRENCY); let chain_access = Arc::new(LdkLiteChainAccess::new(blockchain, bdk_wallet)); @@ -343,7 +354,7 @@ impl LdkLiteBuilder { Arc::clone(&scorer), Arc::clone(&logger), event_handler, - payment::Retry::Timeout(Duration::from_secs(10)), + payment::Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT), )); let running = RwLock::new(None); From 28b9adc12dca45be1fdb119c968a9dedbd003ec8 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 10:46:12 +0200 Subject: [PATCH 13/20] Let `FilesystemLogger` take a file path --- src/lib.rs | 3 ++- src/logger.rs | 15 ++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index b7d19517c..ae05c789d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -201,7 +201,8 @@ impl LdkLiteBuilder { fs::create_dir_all(bdk_data_dir.clone())?; // Step 0: Initialize the Logger - let logger = Arc::new(FilesystemLogger::new(config.storage_dir_path.clone())); + let log_file_path = format!("{}/ldk_lite.log", config.storage_dir_path.clone()); + let logger = Arc::new(FilesystemLogger::new(log_file_path)); // Step 1: Initialize the on-chain wallet and chain access let seed = io::read_or_generate_seed_file(Arc::clone(&config))?; diff --git a/src/logger.rs b/src/logger.rs index 6f864c069..5eb3ab848 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -5,16 +5,18 @@ use lightning::util::ser::Writer; use chrono::Utc; use std::fs; +use std::path::Path; pub(crate) struct FilesystemLogger { - data_dir: String, + file_path: String, } impl FilesystemLogger { - pub(crate) fn new(data_dir: String) -> Self { - let logs_path = format!("{}/logs", data_dir); - fs::create_dir_all(logs_path.clone()).unwrap(); - Self { data_dir: logs_path } + pub(crate) fn new(file_path: String) -> Self { + if let Some(parent_dir) = Path::new(&file_path).parent() { + fs::create_dir_all(parent_dir).unwrap(); + } + Self { file_path } } } impl Logger for FilesystemLogger { @@ -28,11 +30,10 @@ impl Logger for FilesystemLogger { record.line, raw_log ); - let logs_file_path = format!("{}/logs.txt", self.data_dir.clone()); fs::OpenOptions::new() .create(true) .append(true) - .open(logs_file_path) + .open(self.file_path.clone()) .unwrap() .write_all(log.as_bytes()) .unwrap(); From 3471a3fff10e533ecb9bd73e42c3883c291bb42e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 10:48:13 +0200 Subject: [PATCH 14/20] Avoid `mut` for random bytes --- src/lib.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index ae05c789d..595e0e513 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -75,7 +75,7 @@ use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; use bitcoin::BlockHash; -use rand::RngCore; +use rand::Rng; use std::collections::HashMap; use std::fs; @@ -300,8 +300,7 @@ impl LdkLiteBuilder { )); //// Step 11: Initialize the PeerManager - let mut ephemeral_bytes = [0; 32]; - rand::thread_rng().fill_bytes(&mut ephemeral_bytes); + let ephemeral_bytes: [u8; 32] = rand::thread_rng().gen(); let lightning_msg_handler = MessageHandler { chan_handler: Arc::clone(&channel_manager), route_handler: Arc::clone(&gossip_sync), From 5fcb938c5827d8bda162ac2cae5e17674b2ea45e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 12:51:42 +0200 Subject: [PATCH 15/20] f FMT --- src/io.rs | 3 +-- src/lib.rs | 7 ++++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/io.rs b/src/io.rs index 1e3489f68..84df545eb 100644 --- a/src/io.rs +++ b/src/io.rs @@ -1,7 +1,7 @@ use crate::error::LdkLiteError as Error; -use crate::{LdkLiteConfig, NetworkGraph, Scorer, FilesystemLogger}; use crate::hex; +use crate::{FilesystemLogger, LdkLiteConfig, NetworkGraph, Scorer}; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringParameters}; use lightning::util::ser::ReadableArgs; @@ -99,7 +99,6 @@ pub(crate) fn persist_channel_peer( file.write_all(format!("{}\n", peer_info).as_bytes()) } - // TODO: handle different kinds of NetAddress, e.g., the Hostname field. pub(crate) fn parse_peer_info( peer_pubkey_and_ip_addr: String, diff --git a/src/lib.rs b/src/lib.rs index 595e0e513..f5aafad4d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,9 +27,9 @@ mod access; mod error; mod event; -mod logger; -mod io; mod hex; +mod io; +mod logger; use access::LdkLiteChainAccess; pub use error::LdkLiteError as Error; @@ -217,7 +217,8 @@ impl LdkLiteBuilder { // TODO: Check that we can be sure that the Esplora client re-connects in case of failure // and and exits cleanly on drop. Otherwise we need to handle this/move it to the runtime? - let blockchain = EsploraBlockchain::new(&config.esplora_server_url, BDK_CLIENT_STOP_GAP).with_concurrency(BDK_CLIENT_CONCURRENCY); + let blockchain = EsploraBlockchain::new(&config.esplora_server_url, BDK_CLIENT_STOP_GAP) + .with_concurrency(BDK_CLIENT_CONCURRENCY); let chain_access = Arc::new(LdkLiteChainAccess::new(blockchain, bdk_wallet)); From fc82620a9ca46a197d5b51b2e08278e43ad7c0d5 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 19:08:10 +0200 Subject: [PATCH 16/20] Impl `connect_open_channel` --- src/error.rs | 4 +-- src/lib.rs | 82 ++++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 72 insertions(+), 14 deletions(-) diff --git a/src/error.rs b/src/error.rs index e85d729a2..c9b4afa04 100644 --- a/src/error.rs +++ b/src/error.rs @@ -22,7 +22,7 @@ pub enum LdkLiteError { /// TODO ChainStateMismatch, /// A network connection has been closed. - ConnectionClosed, + ConnectionFailed, /// A given peer info could not be parsed. PeerInfoParse(&'static str), /// A wrapped LDK `APIError` @@ -55,7 +55,7 @@ impl fmt::Display for LdkLiteError { LdkLiteError::FundingTxNonWitnessOuputSpend => write!(f, "an input of the funding transaction tried spending a non-SegWit output, which is insecure"), LdkLiteError::FundingTxNotFinalized => write!(f, "the funding transaction could not be finalized"), LdkLiteError::ChainStateMismatch => write!(f, "ChainStateMismatch"), - LdkLiteError::ConnectionClosed => write!(f, "network connection closed"), + LdkLiteError::ConnectionFailed => write!(f, "network connection closed"), LdkLiteError::PeerInfoParse(ref e) => write!(f, "given peer info could not be parsed: {}", e), LdkLiteError::LdkDecode(ref e) => write!(f, "LDK decode error: {}", e), LdkLiteError::LdkApi(ref e) => write!(f, "LDK API error: {:?}", e), diff --git a/src/lib.rs b/src/lib.rs index f5aafad4d..10ea85388 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -54,7 +54,7 @@ use lightning::routing::gossip; use lightning::routing::gossip::P2PGossipSync; use lightning::routing::scoring::ProbabilisticScorer; -use lightning::util::config::UserConfig; +use lightning::util::config::{ChannelHandshakeConfig, ChannelHandshakeLimits, UserConfig}; use lightning::util::ser::ReadableArgs; use lightning_background_processor::BackgroundProcessor; @@ -384,7 +384,7 @@ impl LdkLiteBuilder { /// Wraps all objects that need to be preserved during the run time of `LdkLite`. Will be dropped /// upon [`LdkLite::stop()`]. struct LdkLiteRuntime { - _tokio_runtime: tokio::runtime::Runtime, + tokio_runtime: tokio::runtime::Runtime, _background_processor: BackgroundProcessor, stop_networking: Arc, stop_wallet_sync: Arc, @@ -452,7 +452,7 @@ impl LdkLite { } fn setup_runtime(&self) -> Result { - let _tokio_runtime = + let tokio_runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); // Setup wallet sync @@ -462,7 +462,7 @@ impl LdkLite { let stop_wallet_sync = Arc::new(AtomicBool::new(false)); let stop_sync = Arc::clone(&stop_wallet_sync); - _tokio_runtime.spawn(async move { + tokio_runtime.spawn(async move { let mut rounds = 0; loop { if stop_sync.load(Ordering::Acquire) { @@ -489,7 +489,7 @@ impl LdkLite { let stop_networking = Arc::new(AtomicBool::new(false)); let stop_listen = Arc::clone(&stop_networking); - _tokio_runtime.spawn(async move { + tokio_runtime.spawn(async move { let listener = tokio::net::TcpListener::bind(format!("0.0.0.0:{}", listening_port)).await.expect( "Failed to bind to listen port - is something else already listening on it?", @@ -516,7 +516,7 @@ impl LdkLite { let connect_config = Arc::clone(&self.config); let connect_logger = Arc::clone(&self.logger); let stop_connect = Arc::clone(&stop_networking); - _tokio_runtime.spawn(async move { + tokio_runtime.spawn(async move { let mut interval = tokio::time::interval(Duration::from_secs(1)); loop { if stop_connect.load(Ordering::Acquire) { @@ -571,7 +571,7 @@ impl LdkLite { // TODO: frequently check back on background_processor if there was an error Ok(LdkLiteRuntime { - _tokio_runtime, + tokio_runtime, _background_processor, stop_networking, stop_wallet_sync, @@ -603,9 +603,67 @@ impl LdkLite { Ok(funding_address) } - // Connect to a node and open a new channel. Disconnects and re-connects should be handled automatically - //pub fn connect_open_channel(&mut self, node_id: PublicKey, node_address: NetAddress) -> Result { - //} + /// Connect to a node and open a new channel. Disconnects and re-connects are handled automatically + /// + /// Returns a temporary channel id + pub fn connect_open_channel( + &self, node_pubkey_and_address: &str, channel_amount_sats: u64, announce_channel: bool, + ) -> Result<[u8; 32], Error> { + let runtime_lock = self.running.read().unwrap(); + if runtime_lock.is_none() { + return Err(Error::NotRunning); + } + + let (peer_pubkey, peer_addr) = io::parse_peer_info(node_pubkey_and_address.to_string())?; + + let runtime = runtime_lock.as_ref().unwrap(); + + let con_success = Arc::new(AtomicBool::new(false)); + let con_success_cloned = Arc::clone(&con_success); + let con_logger = Arc::clone(&self.logger); + let con_pm = Arc::clone(&self.peer_manager); + + runtime.tokio_runtime.block_on(async move { + let res = connect_peer_if_necessary(peer_pubkey, peer_addr, con_pm, con_logger).await; + con_success_cloned.store(res.is_ok(), Ordering::Release); + }); + + if !con_success.load(Ordering::Acquire) { + return Err(Error::ConnectionFailed); + } + + // TODO: make some of the UserConfig values configurable through LdkLiteConfig + let user_config = UserConfig { + channel_handshake_limits: ChannelHandshakeLimits { + // lnd's max to_self_delay is 2016, so we want to be compatible. + their_to_self_delay: 2016, + ..Default::default() + }, + channel_handshake_config: ChannelHandshakeConfig { + announced_channel: announce_channel, + ..Default::default() + }, + ..Default::default() + }; + + match self.channel_manager.create_channel( + peer_pubkey, + channel_amount_sats, + 0, + 0, + Some(user_config), + ) { + Ok(temporary_channel_id) => { + log_info!(self.logger, "Initiated channel with peer {}. ", peer_pubkey); + return Ok(temporary_channel_id); + } + Err(e) => { + log_error!(self.logger, "failed to open channel: {:?}", e); + return Err(Error::LdkApi(e)); + } + } + } + // // Close a previously opened channel // pub close_channel(&mut self, channel_id: u64) -> Result<()>; // @@ -803,7 +861,7 @@ async fn do_connect_peer( match futures::poll!(&mut connection_closed_future) { std::task::Poll::Ready(_) => { log_info!(logger, "peer connection closed: {}@{}", pubkey, peer_addr); - return Err(Error::ConnectionClosed); + return Err(Error::ConnectionFailed); } std::task::Poll::Pending => {} } @@ -816,7 +874,7 @@ async fn do_connect_peer( } None => { log_error!(logger, "failed to connect to peer: {}@{}", pubkey, peer_addr); - Err(Error::ConnectionClosed) + Err(Error::ConnectionFailed) } } } From ec3a338c0a3ee3c2b58f028243b4aa306455db2b Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 19:13:51 +0200 Subject: [PATCH 17/20] Add TODO --- src/lib.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 10ea85388..042a1c122 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -646,6 +646,7 @@ impl LdkLite { ..Default::default() }; + // TODO: this seems to block for quite some time. Is this a locking conflict with chain access locks in the background? match self.channel_manager.create_channel( peer_pubkey, channel_amount_sats, @@ -655,11 +656,11 @@ impl LdkLite { ) { Ok(temporary_channel_id) => { log_info!(self.logger, "Initiated channel with peer {}. ", peer_pubkey); - return Ok(temporary_channel_id); + Ok(temporary_channel_id) } Err(e) => { log_error!(self.logger, "failed to open channel: {:?}", e); - return Err(Error::LdkApi(e)); + Err(Error::LdkApi(e)) } } } From 6a8a194e5b1853a59bfc3ac4acaef2d14df11c61 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 19:28:17 +0200 Subject: [PATCH 18/20] Remove `Mutex` for blockchain --- src/access.rs | 21 +++++++++------------ src/lib.rs | 1 - 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/access.rs b/src/access.rs index a38b60eca..54a8df3a6 100644 --- a/src/access.rs +++ b/src/access.rs @@ -17,7 +17,7 @@ pub struct LdkLiteChainAccess where D: BatchDatabase, { - blockchain: Mutex, + blockchain: EsploraBlockchain, wallet: Mutex>, queued_transactions: Mutex>, watched_transactions: Mutex>, @@ -31,7 +31,6 @@ where D: BatchDatabase, { pub fn new(blockchain: EsploraBlockchain, wallet: bdk::Wallet) -> Self { - let blockchain = Mutex::new(blockchain); let wallet = Mutex::new(wallet); let watched_transactions = Mutex::new(Vec::new()); let queued_transactions = Mutex::new(Vec::new()); @@ -52,18 +51,16 @@ where pub fn sync_wallet(&self) -> Result<(), Error> { let sync_options = SyncOptions { progress: None }; - let locked_chain = self.blockchain.lock().unwrap(); self.wallet .lock() .unwrap() - .sync(&*locked_chain, sync_options) + .sync(&self.blockchain, sync_options) .map_err(|e| Error::Bdk(e))?; Ok(()) } pub fn sync(&self, confirmables: Vec<&dyn Confirm>) -> Result<(), Error> { - let locked_chain = self.blockchain.lock().unwrap(); - let client = &*(*locked_chain); + let client = &*self.blockchain; let cur_height = client.get_height()?; @@ -216,7 +213,7 @@ where &self, output_script: &Script, value_sats: u64, confirmation_target: ConfirmationTarget, ) -> Result { let num_blocks = num_blocks_from_conf_target(confirmation_target); - let fee_rate = self.blockchain.lock().unwrap().estimate_fee(num_blocks)?; + let fee_rate = self.blockchain.estimate_fee(num_blocks)?; let locked_wallet = self.wallet.lock().unwrap(); let mut tx_builder = locked_wallet.build_tx(); @@ -256,7 +253,7 @@ where // TODO: make this an unwrap_or? // TODO: double-check here https://github.com/bitcoindevkit/bdk/pull/678/commits/03a5b223800b0fafd0e7c2c82bf4943ac9d5ae58 // TODO: switch to https://github.com/bitcoindevkit/bdk/pull/678 once that is merged - self.blockchain.lock().unwrap().estimate_fee(num_blocks).unwrap().fee_wu(1000) as u32 + self.blockchain.estimate_fee(num_blocks).unwrap().fee_wu(1000) as u32 } } @@ -265,7 +262,7 @@ where D: BatchDatabase, { fn broadcast_transaction(&self, tx: &Transaction) { - self.blockchain.lock().unwrap().broadcast(tx).ok(); + self.blockchain.broadcast(tx).ok(); } } @@ -289,7 +286,7 @@ where D: BatchDatabase, { fn get_height(&self) -> Result { - self.blockchain.lock().unwrap().get_height() + self.blockchain.get_height() } } @@ -298,7 +295,7 @@ where D: BatchDatabase, { fn get_block_hash(&self, height: u64) -> Result { - self.blockchain.lock().unwrap().get_block_hash(height) + self.blockchain.get_block_hash(height) } } @@ -307,7 +304,7 @@ where D: BatchDatabase, { fn get_tx(&self, txid: &Txid) -> Result, bdk::Error> { - self.blockchain.lock().unwrap().get_tx(txid) + self.blockchain.get_tx(txid) } } diff --git a/src/lib.rs b/src/lib.rs index 042a1c122..598c8d98c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -646,7 +646,6 @@ impl LdkLite { ..Default::default() }; - // TODO: this seems to block for quite some time. Is this a locking conflict with chain access locks in the background? match self.channel_manager.create_channel( peer_pubkey, channel_amount_sats, From 01c6f49c27126a695cb0a8b4679f71d3a9a45b29 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 19:35:05 +0200 Subject: [PATCH 19/20] Impl `close_channel` --- src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 598c8d98c..5eb150177 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -664,9 +664,13 @@ impl LdkLite { } } - // // Close a previously opened channel - // pub close_channel(&mut self, channel_id: u64) -> Result<()>; - // + /// Close a previously opened channel. + pub fn close_channel( + &self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, + ) -> Result<(), Error> { + Ok(self.channel_manager.close_channel(channel_id, counterparty_node_id)?) + } + /// Send a payement given an invoice. pub fn send_payment(&self, invoice: Invoice) -> Result { if self.running.read().unwrap().is_none() { From d6f09adf4654554fab383d676f8964bdc1614602 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Wed, 31 Aug 2022 20:52:41 +0200 Subject: [PATCH 20/20] Log wallet sync --- src/access.rs | 36 ++++++++++++++++++++++++++++++------ src/lib.rs | 3 ++- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/access.rs b/src/access.rs index 54a8df3a6..3a6752604 100644 --- a/src/access.rs +++ b/src/access.rs @@ -1,4 +1,9 @@ use crate::error::LdkLiteError as Error; +#[allow(unused_imports)] +use crate::logger::{ + log_error, log_given_level, log_info, log_internal, log_trace, log_warn, FilesystemLogger, + Logger, +}; use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use lightning::chain::WatchedOutput; @@ -11,7 +16,8 @@ use bdk::{SignOptions, SyncOptions}; use bitcoin::{BlockHash, Script, Transaction, Txid}; -use std::sync::Mutex; +use std::sync::{Arc, Mutex}; +use std::time::Instant; pub struct LdkLiteChainAccess where @@ -24,13 +30,16 @@ where queued_outputs: Mutex>, watched_outputs: Mutex>, last_sync_height: Mutex>, + logger: Arc, } impl LdkLiteChainAccess where D: BatchDatabase, { - pub fn new(blockchain: EsploraBlockchain, wallet: bdk::Wallet) -> Self { + pub(crate) fn new( + blockchain: EsploraBlockchain, wallet: bdk::Wallet, logger: Arc, + ) -> Self { let wallet = Mutex::new(wallet); let watched_transactions = Mutex::new(Vec::new()); let queued_transactions = Mutex::new(Vec::new()); @@ -45,21 +54,31 @@ where queued_outputs, watched_outputs, last_sync_height, + logger, } } - pub fn sync_wallet(&self) -> Result<(), Error> { + pub(crate) fn sync_wallet(&self) -> Result<(), Error> { let sync_options = SyncOptions { progress: None }; + let now = Instant::now(); self.wallet .lock() .unwrap() .sync(&self.blockchain, sync_options) .map_err(|e| Error::Bdk(e))?; + + log_info!( + self.logger, + "On-chain wallet sync finished in {} seconds.", + now.elapsed().as_secs() + ); + Ok(()) } - pub fn sync(&self, confirmables: Vec<&dyn Confirm>) -> Result<(), Error> { + pub(crate) fn sync(&self, confirmables: Vec<&dyn Confirm>) -> Result<(), Error> { + let now = Instant::now(); let client = &*self.blockchain; let cur_height = client.get_height()?; @@ -206,10 +225,15 @@ where } // TODO: check whether new outputs have been registered by now and process them + log_info!( + self.logger, + "Lightning wallet sync finished in {} seconds.", + now.elapsed().as_secs() + ); Ok(()) } - pub fn create_funding_transaction( + pub(crate) fn create_funding_transaction( &self, output_script: &Script, value_sats: u64, confirmation_target: ConfirmationTarget, ) -> Result { let num_blocks = num_blocks_from_conf_target(confirmation_target); @@ -238,7 +262,7 @@ where Ok(psbt.extract_tx()) } - pub fn get_new_address(&self) -> Result { + pub(crate) fn get_new_address(&self) -> Result { let address_info = self.wallet.lock().unwrap().get_address(AddressIndex::New)?; Ok(address_info.address) } diff --git a/src/lib.rs b/src/lib.rs index 5eb150177..19814ab80 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -220,7 +220,8 @@ impl LdkLiteBuilder { let blockchain = EsploraBlockchain::new(&config.esplora_server_url, BDK_CLIENT_STOP_GAP) .with_concurrency(BDK_CLIENT_CONCURRENCY); - let chain_access = Arc::new(LdkLiteChainAccess::new(blockchain, bdk_wallet)); + let chain_access = + Arc::new(LdkLiteChainAccess::new(blockchain, bdk_wallet, Arc::clone(&logger))); // Step 3: Initialize Persist let persister = Arc::new(FilesystemPersister::new(ldk_data_dir.clone()));