From 5d37cc44f9200861a278d074f8caa99e8db6be02 Mon Sep 17 00:00:00 2001 From: Patrick Kamin Date: Wed, 19 May 2021 22:08:18 -0700 Subject: [PATCH 001/112] Generate doxygen documentation for test sources --- doc/Doxyfile.in | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 21bf587eaf..d8fd46d1c7 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -863,9 +863,7 @@ RECURSIVE = YES EXCLUDE = src/crc32c \ src/leveldb \ - src/json \ - src/test \ - src/qt/test + src/json # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded From 8ea8c927ac05980d6a81252e40b7444e9abb74f9 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Mon, 24 May 2021 00:46:08 +0200 Subject: [PATCH 002/112] index: Avoid unnecessary type casts in coinstatsindex --- src/index/coinstatsindex.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index e046527283..5e3f7602c8 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -143,10 +143,10 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) continue; } - for (size_t j = 0; j < tx->vout.size(); ++j) { + for (uint32_t j = 0; j < tx->vout.size(); ++j) { const CTxOut& out{tx->vout[j]}; Coin coin{out, pindex->nHeight, tx->IsCoinBase()}; - COutPoint outpoint{tx->GetHash(), static_cast(j)}; + COutPoint outpoint{tx->GetHash(), j}; // Skip unspendable coins if (coin.out.scriptPubKey.IsUnspendable()) { @@ -402,9 +402,9 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex for (size_t i = 0; i < block.vtx.size(); ++i) { const auto& tx{block.vtx.at(i)}; - for (size_t j = 0; j < tx->vout.size(); ++j) { + for (uint32_t j = 0; j < tx->vout.size(); ++j) { const CTxOut& out{tx->vout[j]}; - COutPoint outpoint{tx->GetHash(), static_cast(j)}; + COutPoint outpoint{tx->GetHash(), j}; Coin coin{out, pindex->nHeight, tx->IsCoinBase()}; // Skip unspendable coins From d09d1cf1a267b1c5563d8876aa55c4e8f70f0562 Mon Sep 17 00:00:00 2001 From: Jarol Rodriguez Date: Sun, 16 May 2021 20:36:59 -0400 Subject: [PATCH 003/112] qt, test: introduce FindInConsole function Allows for regex searching into the console output. --- src/qt/test/apptests.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp index cb3dbd2267..c5f9b570a1 100644 --- a/src/qt/test/apptests.cpp +++ b/src/qt/test/apptests.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -30,6 +31,13 @@ #include namespace { +//! Regex find a string group inside of the console output +QString FindInConsole(const QString& output, const QString& pattern) +{ + const QRegularExpression re(pattern); + return re.match(output).captured(1); +} + //! Call getblockchaininfo RPC and check first field of JSON output. void TestRpcCommand(RPCConsole* console) { From 6969b2bb98a2f44e1b51c905db92ec2e28345078 Mon Sep 17 00:00:00 2001 From: Jarol Rodriguez Date: Sun, 16 May 2021 23:43:41 -0400 Subject: [PATCH 004/112] qt, test: use regex search in apptests use the FindInConsole function to regex search for values in apptests instead of Univalue read. --- src/qt/test/apptests.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp index c5f9b570a1..318a0edf6e 100644 --- a/src/qt/test/apptests.cpp +++ b/src/qt/test/apptests.cpp @@ -12,7 +12,6 @@ #include #include #include -#include #include #if defined(HAVE_CONFIG_H) @@ -24,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -49,10 +49,9 @@ void TestRpcCommand(RPCConsole* console) QTest::keyClick(lineEdit, Qt::Key_Return); QVERIFY(mw_spy.wait(1000)); QCOMPARE(mw_spy.count(), 4); - QString output = messagesWidget->toPlainText(); - UniValue value; - value.read(output.right(output.size() - output.lastIndexOf(QChar::ObjectReplacementCharacter) - 1).toStdString()); - QCOMPARE(value["chain"].get_str(), std::string("regtest")); + const QString output = messagesWidget->toPlainText(); + const QString pattern = QStringLiteral("\"chain\": \"(\\w+)\""); + QCOMPARE(FindInConsole(output, pattern), QString("regtest")); } } // namespace From fb65dde147f63422c4148b089c2f5be0bf5ba80f Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Thu, 3 Jun 2021 01:31:47 +0200 Subject: [PATCH 005/112] scripted-diff: Fix coinstats data member names Initially these values were 'per block' in an earlier version but were then changed to total values. The names were not updated to reflect that. -BEGIN VERIFY SCRIPT- s() { git grep -l "$1" src | xargs sed -i "s/$1/$2/g"; } s 'm_block_unspendable_amount' 'm_total_unspendable_amount' s 'm_block_prevout_spent_amount' 'm_total_prevout_spent_amount' s 'm_block_new_outputs_ex_coinbase_amount' 'm_total_new_outputs_ex_coinbase_amount' s 'm_block_coinbase_amount' 'm_total_coinbase_amount' s 'block_unspendable_amount' 'total_unspendable_amount' s 'block_prevout_spent_amount' 'total_prevout_spent_amount' s 'block_new_outputs_ex_coinbase_amount' 'total_new_outputs_ex_coinbase_amount' s 'block_coinbase_amount' 'total_coinbase_amount' s 'unspendables_genesis_block' 'total_unspendables_genesis_block' s 'unspendables_bip30' 'total_unspendables_bip30' s 'unspendables_scripts' 'total_unspendables_scripts' s 'unspendables_unclaimed_rewards' 'total_unspendables_unclaimed_rewards' s 'm_unspendables_genesis_block' 'm_total_unspendables_genesis_block' s 'm_unspendables_bip30' 'm_total_unspendables_bip30' s 'm_unspendables_scripts' 'm_total_unspendables_scripts' s 'm_unspendables_unclaimed_rewards' 'm_total_unspendables_unclaimed_rewards' -END VERIFY SCRIPT- --- src/index/coinstatsindex.cpp | 136 +++++++++++++++++------------------ src/index/coinstatsindex.h | 16 ++--- src/node/coinstats.h | 16 ++--- src/rpc/blockchain.cpp | 18 ++--- 4 files changed, 93 insertions(+), 93 deletions(-) diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index 5e3f7602c8..cb940234e2 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -24,14 +24,14 @@ struct DBVal { uint64_t bogo_size; CAmount total_amount; CAmount total_subsidy; - CAmount block_unspendable_amount; - CAmount block_prevout_spent_amount; - CAmount block_new_outputs_ex_coinbase_amount; - CAmount block_coinbase_amount; - CAmount unspendables_genesis_block; - CAmount unspendables_bip30; - CAmount unspendables_scripts; - CAmount unspendables_unclaimed_rewards; + CAmount total_unspendable_amount; + CAmount total_prevout_spent_amount; + CAmount total_new_outputs_ex_coinbase_amount; + CAmount total_coinbase_amount; + CAmount total_unspendables_genesis_block; + CAmount total_unspendables_bip30; + CAmount total_unspendables_scripts; + CAmount total_unspendables_unclaimed_rewards; SERIALIZE_METHODS(DBVal, obj) { @@ -40,14 +40,14 @@ struct DBVal { READWRITE(obj.bogo_size); READWRITE(obj.total_amount); READWRITE(obj.total_subsidy); - READWRITE(obj.block_unspendable_amount); - READWRITE(obj.block_prevout_spent_amount); - READWRITE(obj.block_new_outputs_ex_coinbase_amount); - READWRITE(obj.block_coinbase_amount); - READWRITE(obj.unspendables_genesis_block); - READWRITE(obj.unspendables_bip30); - READWRITE(obj.unspendables_scripts); - READWRITE(obj.unspendables_unclaimed_rewards); + READWRITE(obj.total_unspendable_amount); + READWRITE(obj.total_prevout_spent_amount); + READWRITE(obj.total_new_outputs_ex_coinbase_amount); + READWRITE(obj.total_coinbase_amount); + READWRITE(obj.total_unspendables_genesis_block); + READWRITE(obj.total_unspendables_bip30); + READWRITE(obj.total_unspendables_scripts); + READWRITE(obj.total_unspendables_unclaimed_rewards); } }; @@ -138,8 +138,8 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) // Skip duplicate txid coinbase transactions (BIP30). if (is_bip30_block && tx->IsCoinBase()) { - m_block_unspendable_amount += block_subsidy; - m_unspendables_bip30 += block_subsidy; + m_total_unspendable_amount += block_subsidy; + m_total_unspendables_bip30 += block_subsidy; continue; } @@ -150,17 +150,17 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) // Skip unspendable coins if (coin.out.scriptPubKey.IsUnspendable()) { - m_block_unspendable_amount += coin.out.nValue; - m_unspendables_scripts += coin.out.nValue; + m_total_unspendable_amount += coin.out.nValue; + m_total_unspendables_scripts += coin.out.nValue; continue; } m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin))); if (tx->IsCoinBase()) { - m_block_coinbase_amount += coin.out.nValue; + m_total_coinbase_amount += coin.out.nValue; } else { - m_block_new_outputs_ex_coinbase_amount += coin.out.nValue; + m_total_new_outputs_ex_coinbase_amount += coin.out.nValue; } ++m_transaction_output_count; @@ -178,7 +178,7 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin))); - m_block_prevout_spent_amount += coin.out.nValue; + m_total_prevout_spent_amount += coin.out.nValue; --m_transaction_output_count; m_total_amount -= coin.out.nValue; @@ -188,17 +188,17 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) } } else { // genesis block - m_block_unspendable_amount += block_subsidy; - m_unspendables_genesis_block += block_subsidy; + m_total_unspendable_amount += block_subsidy; + m_total_unspendables_genesis_block += block_subsidy; } // If spent prevouts + block subsidy are still a higher amount than // new outputs + coinbase + current unspendable amount this means // the miner did not claim the full block reward. Unclaimed block // rewards are also unspendable. - const CAmount unclaimed_rewards{(m_block_prevout_spent_amount + m_total_subsidy) - (m_block_new_outputs_ex_coinbase_amount + m_block_coinbase_amount + m_block_unspendable_amount)}; - m_block_unspendable_amount += unclaimed_rewards; - m_unspendables_unclaimed_rewards += unclaimed_rewards; + const CAmount unclaimed_rewards{(m_total_prevout_spent_amount + m_total_subsidy) - (m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount + m_total_unspendable_amount)}; + m_total_unspendable_amount += unclaimed_rewards; + m_total_unspendables_unclaimed_rewards += unclaimed_rewards; std::pair value; value.first = pindex->GetBlockHash(); @@ -206,14 +206,14 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) value.second.bogo_size = m_bogo_size; value.second.total_amount = m_total_amount; value.second.total_subsidy = m_total_subsidy; - value.second.block_unspendable_amount = m_block_unspendable_amount; - value.second.block_prevout_spent_amount = m_block_prevout_spent_amount; - value.second.block_new_outputs_ex_coinbase_amount = m_block_new_outputs_ex_coinbase_amount; - value.second.block_coinbase_amount = m_block_coinbase_amount; - value.second.unspendables_genesis_block = m_unspendables_genesis_block; - value.second.unspendables_bip30 = m_unspendables_bip30; - value.second.unspendables_scripts = m_unspendables_scripts; - value.second.unspendables_unclaimed_rewards = m_unspendables_unclaimed_rewards; + value.second.total_unspendable_amount = m_total_unspendable_amount; + value.second.total_prevout_spent_amount = m_total_prevout_spent_amount; + value.second.total_new_outputs_ex_coinbase_amount = m_total_new_outputs_ex_coinbase_amount; + value.second.total_coinbase_amount = m_total_coinbase_amount; + value.second.total_unspendables_genesis_block = m_total_unspendables_genesis_block; + value.second.total_unspendables_bip30 = m_total_unspendables_bip30; + value.second.total_unspendables_scripts = m_total_unspendables_scripts; + value.second.total_unspendables_unclaimed_rewards = m_total_unspendables_unclaimed_rewards; uint256 out; m_muhash.Finalize(out); @@ -317,14 +317,14 @@ bool CoinStatsIndex::LookUpStats(const CBlockIndex* block_index, CCoinsStats& co coins_stats.nBogoSize = entry.bogo_size; coins_stats.nTotalAmount = entry.total_amount; coins_stats.total_subsidy = entry.total_subsidy; - coins_stats.block_unspendable_amount = entry.block_unspendable_amount; - coins_stats.block_prevout_spent_amount = entry.block_prevout_spent_amount; - coins_stats.block_new_outputs_ex_coinbase_amount = entry.block_new_outputs_ex_coinbase_amount; - coins_stats.block_coinbase_amount = entry.block_coinbase_amount; - coins_stats.unspendables_genesis_block = entry.unspendables_genesis_block; - coins_stats.unspendables_bip30 = entry.unspendables_bip30; - coins_stats.unspendables_scripts = entry.unspendables_scripts; - coins_stats.unspendables_unclaimed_rewards = entry.unspendables_unclaimed_rewards; + coins_stats.total_unspendable_amount = entry.total_unspendable_amount; + coins_stats.total_prevout_spent_amount = entry.total_prevout_spent_amount; + coins_stats.total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount; + coins_stats.total_coinbase_amount = entry.total_coinbase_amount; + coins_stats.total_unspendables_genesis_block = entry.total_unspendables_genesis_block; + coins_stats.total_unspendables_bip30 = entry.total_unspendables_bip30; + coins_stats.total_unspendables_scripts = entry.total_unspendables_scripts; + coins_stats.total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards; return true; } @@ -354,14 +354,14 @@ bool CoinStatsIndex::Init() m_bogo_size = entry.bogo_size; m_total_amount = entry.total_amount; m_total_subsidy = entry.total_subsidy; - m_block_unspendable_amount = entry.block_unspendable_amount; - m_block_prevout_spent_amount = entry.block_prevout_spent_amount; - m_block_new_outputs_ex_coinbase_amount = entry.block_new_outputs_ex_coinbase_amount; - m_block_coinbase_amount = entry.block_coinbase_amount; - m_unspendables_genesis_block = entry.unspendables_genesis_block; - m_unspendables_bip30 = entry.unspendables_bip30; - m_unspendables_scripts = entry.unspendables_scripts; - m_unspendables_unclaimed_rewards = entry.unspendables_unclaimed_rewards; + m_total_unspendable_amount = entry.total_unspendable_amount; + m_total_prevout_spent_amount = entry.total_prevout_spent_amount; + m_total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount; + m_total_coinbase_amount = entry.total_coinbase_amount; + m_total_unspendables_genesis_block = entry.total_unspendables_genesis_block; + m_total_unspendables_bip30 = entry.total_unspendables_bip30; + m_total_unspendables_scripts = entry.total_unspendables_scripts; + m_total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards; } return true; @@ -409,17 +409,17 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex // Skip unspendable coins if (coin.out.scriptPubKey.IsUnspendable()) { - m_block_unspendable_amount -= coin.out.nValue; - m_unspendables_scripts -= coin.out.nValue; + m_total_unspendable_amount -= coin.out.nValue; + m_total_unspendables_scripts -= coin.out.nValue; continue; } m_muhash.Remove(MakeUCharSpan(TxOutSer(outpoint, coin))); if (tx->IsCoinBase()) { - m_block_coinbase_amount -= coin.out.nValue; + m_total_coinbase_amount -= coin.out.nValue; } else { - m_block_new_outputs_ex_coinbase_amount -= coin.out.nValue; + m_total_new_outputs_ex_coinbase_amount -= coin.out.nValue; } --m_transaction_output_count; @@ -437,7 +437,7 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex m_muhash.Insert(MakeUCharSpan(TxOutSer(outpoint, coin))); - m_block_prevout_spent_amount -= coin.out.nValue; + m_total_prevout_spent_amount -= coin.out.nValue; m_transaction_output_count++; m_total_amount += coin.out.nValue; @@ -446,9 +446,9 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex } } - const CAmount unclaimed_rewards{(m_block_new_outputs_ex_coinbase_amount + m_block_coinbase_amount + m_block_unspendable_amount) - (m_block_prevout_spent_amount + m_total_subsidy)}; - m_block_unspendable_amount -= unclaimed_rewards; - m_unspendables_unclaimed_rewards -= unclaimed_rewards; + const CAmount unclaimed_rewards{(m_total_new_outputs_ex_coinbase_amount + m_total_coinbase_amount + m_total_unspendable_amount) - (m_total_prevout_spent_amount + m_total_subsidy)}; + m_total_unspendable_amount -= unclaimed_rewards; + m_total_unspendables_unclaimed_rewards -= unclaimed_rewards; // Check that the rolled back internal values are consistent with the DB read out uint256 out; @@ -459,14 +459,14 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex Assert(m_total_amount == read_out.second.total_amount); Assert(m_bogo_size == read_out.second.bogo_size); Assert(m_total_subsidy == read_out.second.total_subsidy); - Assert(m_block_unspendable_amount == read_out.second.block_unspendable_amount); - Assert(m_block_prevout_spent_amount == read_out.second.block_prevout_spent_amount); - Assert(m_block_new_outputs_ex_coinbase_amount == read_out.second.block_new_outputs_ex_coinbase_amount); - Assert(m_block_coinbase_amount == read_out.second.block_coinbase_amount); - Assert(m_unspendables_genesis_block == read_out.second.unspendables_genesis_block); - Assert(m_unspendables_bip30 == read_out.second.unspendables_bip30); - Assert(m_unspendables_scripts == read_out.second.unspendables_scripts); - Assert(m_unspendables_unclaimed_rewards == read_out.second.unspendables_unclaimed_rewards); + Assert(m_total_unspendable_amount == read_out.second.total_unspendable_amount); + Assert(m_total_prevout_spent_amount == read_out.second.total_prevout_spent_amount); + Assert(m_total_new_outputs_ex_coinbase_amount == read_out.second.total_new_outputs_ex_coinbase_amount); + Assert(m_total_coinbase_amount == read_out.second.total_coinbase_amount); + Assert(m_total_unspendables_genesis_block == read_out.second.total_unspendables_genesis_block); + Assert(m_total_unspendables_bip30 == read_out.second.total_unspendables_bip30); + Assert(m_total_unspendables_scripts == read_out.second.total_unspendables_scripts); + Assert(m_total_unspendables_unclaimed_rewards == read_out.second.total_unspendables_unclaimed_rewards); return m_db->Write(DB_MUHASH, m_muhash); } diff --git a/src/index/coinstatsindex.h b/src/index/coinstatsindex.h index 6149f9b4b3..a575b37c7c 100644 --- a/src/index/coinstatsindex.h +++ b/src/index/coinstatsindex.h @@ -25,14 +25,14 @@ class CoinStatsIndex final : public BaseIndex uint64_t m_bogo_size{0}; CAmount m_total_amount{0}; CAmount m_total_subsidy{0}; - CAmount m_block_unspendable_amount{0}; - CAmount m_block_prevout_spent_amount{0}; - CAmount m_block_new_outputs_ex_coinbase_amount{0}; - CAmount m_block_coinbase_amount{0}; - CAmount m_unspendables_genesis_block{0}; - CAmount m_unspendables_bip30{0}; - CAmount m_unspendables_scripts{0}; - CAmount m_unspendables_unclaimed_rewards{0}; + CAmount m_total_unspendable_amount{0}; + CAmount m_total_prevout_spent_amount{0}; + CAmount m_total_new_outputs_ex_coinbase_amount{0}; + CAmount m_total_coinbase_amount{0}; + CAmount m_total_unspendables_genesis_block{0}; + CAmount m_total_unspendables_bip30{0}; + CAmount m_total_unspendables_scripts{0}; + CAmount m_total_unspendables_unclaimed_rewards{0}; bool ReverseBlock(const CBlock& block, const CBlockIndex* pindex); diff --git a/src/node/coinstats.h b/src/node/coinstats.h index 8be256edc9..ae2e46e4d9 100644 --- a/src/node/coinstats.h +++ b/src/node/coinstats.h @@ -46,14 +46,14 @@ struct CCoinsStats // Following values are only available from coinstats index CAmount total_subsidy{0}; - CAmount block_unspendable_amount{0}; - CAmount block_prevout_spent_amount{0}; - CAmount block_new_outputs_ex_coinbase_amount{0}; - CAmount block_coinbase_amount{0}; - CAmount unspendables_genesis_block{0}; - CAmount unspendables_bip30{0}; - CAmount unspendables_scripts{0}; - CAmount unspendables_unclaimed_rewards{0}; + CAmount total_unspendable_amount{0}; + CAmount total_prevout_spent_amount{0}; + CAmount total_new_outputs_ex_coinbase_amount{0}; + CAmount total_coinbase_amount{0}; + CAmount total_unspendables_genesis_block{0}; + CAmount total_unspendables_bip30{0}; + CAmount total_unspendables_scripts{0}; + CAmount total_unspendables_unclaimed_rewards{0}; CCoinsStats(CoinStatsHashType hash_type) : m_hash_type(hash_type) {} }; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 03f28239ba..ee2f5a549b 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1191,7 +1191,7 @@ static RPCHelpMan gettxoutsetinfo() ret.pushKV("transactions", static_cast(stats.nTransactions)); ret.pushKV("disk_size", stats.nDiskSize); } else { - ret.pushKV("total_unspendable_amount", ValueFromAmount(stats.block_unspendable_amount)); + ret.pushKV("total_unspendable_amount", ValueFromAmount(stats.total_unspendable_amount)); CCoinsStats prev_stats{hash_type}; @@ -1200,16 +1200,16 @@ static RPCHelpMan gettxoutsetinfo() } UniValue block_info(UniValue::VOBJ); - block_info.pushKV("prevout_spent", ValueFromAmount(stats.block_prevout_spent_amount - prev_stats.block_prevout_spent_amount)); - block_info.pushKV("coinbase", ValueFromAmount(stats.block_coinbase_amount - prev_stats.block_coinbase_amount)); - block_info.pushKV("new_outputs_ex_coinbase", ValueFromAmount(stats.block_new_outputs_ex_coinbase_amount - prev_stats.block_new_outputs_ex_coinbase_amount)); - block_info.pushKV("unspendable", ValueFromAmount(stats.block_unspendable_amount - prev_stats.block_unspendable_amount)); + block_info.pushKV("prevout_spent", ValueFromAmount(stats.total_prevout_spent_amount - prev_stats.total_prevout_spent_amount)); + block_info.pushKV("coinbase", ValueFromAmount(stats.total_coinbase_amount - prev_stats.total_coinbase_amount)); + block_info.pushKV("new_outputs_ex_coinbase", ValueFromAmount(stats.total_new_outputs_ex_coinbase_amount - prev_stats.total_new_outputs_ex_coinbase_amount)); + block_info.pushKV("unspendable", ValueFromAmount(stats.total_unspendable_amount - prev_stats.total_unspendable_amount)); UniValue unspendables(UniValue::VOBJ); - unspendables.pushKV("genesis_block", ValueFromAmount(stats.unspendables_genesis_block - prev_stats.unspendables_genesis_block)); - unspendables.pushKV("bip30", ValueFromAmount(stats.unspendables_bip30 - prev_stats.unspendables_bip30)); - unspendables.pushKV("scripts", ValueFromAmount(stats.unspendables_scripts - prev_stats.unspendables_scripts)); - unspendables.pushKV("unclaimed_rewards", ValueFromAmount(stats.unspendables_unclaimed_rewards - prev_stats.unspendables_unclaimed_rewards)); + unspendables.pushKV("genesis_block", ValueFromAmount(stats.total_unspendables_genesis_block - prev_stats.total_unspendables_genesis_block)); + unspendables.pushKV("bip30", ValueFromAmount(stats.total_unspendables_bip30 - prev_stats.total_unspendables_bip30)); + unspendables.pushKV("scripts", ValueFromAmount(stats.total_unspendables_scripts - prev_stats.total_unspendables_scripts)); + unspendables.pushKV("unclaimed_rewards", ValueFromAmount(stats.total_unspendables_unclaimed_rewards - prev_stats.total_unspendables_unclaimed_rewards)); block_info.pushKV("unspendables", unspendables); ret.pushKV("block_info", block_info); From 1e3842385b8c0d15086c7cd8736f8c67e6c0c285 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Mon, 24 May 2021 01:18:51 +0200 Subject: [PATCH 006/112] index: Use batch writing in coinstatsindex WriteBlock --- src/index/coinstatsindex.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index cb940234e2..084e6b9925 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -219,7 +219,10 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) m_muhash.Finalize(out); value.second.muhash = out; - return m_db->Write(DBHeightKey(pindex->nHeight), value) && m_db->Write(DB_MUHASH, m_muhash); + CDBBatch batch(*m_db); + batch.Write(DBHeightKey(pindex->nHeight), value); + batch.Write(DB_MUHASH, m_muhash); + return m_db->WriteBatch(batch); } static bool CopyHeightIndexToHashIndex(CDBIterator& db_it, CDBBatch& batch, From 01386bfd88019397237256cb16f91de346eb66f2 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Mon, 24 May 2021 01:24:05 +0200 Subject: [PATCH 007/112] Index: Return early from failed coinstatsindex init --- src/index/coinstatsindex.cpp | 40 +++++++++++++++++------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index 084e6b9925..b3f5d75fb3 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -344,33 +344,31 @@ bool CoinStatsIndex::Init() } } - if (BaseIndex::Init()) { - const CBlockIndex* pindex{CurrentIndex()}; + if (!BaseIndex::Init()) return false; - if (pindex) { - DBVal entry; - if (!LookUpOne(*m_db, pindex, entry)) { - return false; - } + const CBlockIndex* pindex{CurrentIndex()}; - m_transaction_output_count = entry.transaction_output_count; - m_bogo_size = entry.bogo_size; - m_total_amount = entry.total_amount; - m_total_subsidy = entry.total_subsidy; - m_total_unspendable_amount = entry.total_unspendable_amount; - m_total_prevout_spent_amount = entry.total_prevout_spent_amount; - m_total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount; - m_total_coinbase_amount = entry.total_coinbase_amount; - m_total_unspendables_genesis_block = entry.total_unspendables_genesis_block; - m_total_unspendables_bip30 = entry.total_unspendables_bip30; - m_total_unspendables_scripts = entry.total_unspendables_scripts; - m_total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards; + if (pindex) { + DBVal entry; + if (!LookUpOne(*m_db, pindex, entry)) { + return false; } - return true; + m_transaction_output_count = entry.transaction_output_count; + m_bogo_size = entry.bogo_size; + m_total_amount = entry.total_amount; + m_total_subsidy = entry.total_subsidy; + m_total_unspendable_amount = entry.total_unspendable_amount; + m_total_prevout_spent_amount = entry.total_prevout_spent_amount; + m_total_new_outputs_ex_coinbase_amount = entry.total_new_outputs_ex_coinbase_amount; + m_total_coinbase_amount = entry.total_coinbase_amount; + m_total_unspendables_genesis_block = entry.total_unspendables_genesis_block; + m_total_unspendables_bip30 = entry.total_unspendables_bip30; + m_total_unspendables_scripts = entry.total_unspendables_scripts; + m_total_unspendables_unclaimed_rewards = entry.total_unspendables_unclaimed_rewards; } - return false; + return true; } // Reverse a single block as part of a reorg From 2565478c813fb7278153b113de4b9338fc186872 Mon Sep 17 00:00:00 2001 From: Russell Yanofsky Date: Fri, 4 Jun 2021 17:28:46 -0400 Subject: [PATCH 008/112] wallet test refactor: add CreateSyncedWallet function No change in behavior. This just moves some code from the ListCoins test setup to a reusable util function, so it can be reused in a new test in the next commit. --- build_msvc/test_bitcoin/test_bitcoin.vcxproj | 1 + src/Makefile.test.include | 2 ++ src/wallet/test/util.cpp | 38 ++++++++++++++++++++ src/wallet/test/util.h | 19 ++++++++++ src/wallet/test/wallet_tests.cpp | 16 ++------- 5 files changed, 62 insertions(+), 14 deletions(-) create mode 100644 src/wallet/test/util.cpp create mode 100644 src/wallet/test/util.h diff --git a/build_msvc/test_bitcoin/test_bitcoin.vcxproj b/build_msvc/test_bitcoin/test_bitcoin.vcxproj index 5c4b777d51..bb1a780bfa 100644 --- a/build_msvc/test_bitcoin/test_bitcoin.vcxproj +++ b/build_msvc/test_bitcoin/test_bitcoin.vcxproj @@ -16,6 +16,7 @@ + diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 105d09f730..1374f3892e 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -170,6 +170,8 @@ endif BITCOIN_TEST_SUITE += \ + wallet/test/util.cpp \ + wallet/test/util.h \ wallet/test/wallet_test_fixture.cpp \ wallet/test/wallet_test_fixture.h \ wallet/test/init_test_fixture.cpp \ diff --git a/src/wallet/test/util.cpp b/src/wallet/test/util.cpp new file mode 100644 index 0000000000..c3061b93c0 --- /dev/null +++ b/src/wallet/test/util.cpp @@ -0,0 +1,38 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include +#include +#include + +#include + +#include + +std::unique_ptr CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, const CKey& key) +{ + auto wallet = std::make_unique(&chain, "", CreateMockWalletDatabase()); + { + LOCK2(wallet->cs_wallet, ::cs_main); + wallet->SetLastBlockProcessed(cchain.Height(), cchain.Tip()->GetBlockHash()); + } + wallet->LoadWallet(); + { + auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan(); + LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore); + spk_man->AddKeyPubKey(key, key.GetPubKey()); + } + WalletRescanReserver reserver(*wallet); + reserver.reserve(); + CWallet::ScanResult result = wallet->ScanForWalletTransactions(cchain.Genesis()->GetBlockHash(), 0 /* start_height */, {} /* max_height */, reserver, false /* update */); + BOOST_CHECK_EQUAL(result.status, CWallet::ScanResult::SUCCESS); + BOOST_CHECK_EQUAL(result.last_scanned_block, cchain.Tip()->GetBlockHash()); + BOOST_CHECK_EQUAL(*result.last_scanned_height, cchain.Height()); + BOOST_CHECK(result.last_failed_block.IsNull()); + return wallet; +} diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h new file mode 100644 index 0000000000..288c111571 --- /dev/null +++ b/src/wallet/test/util.h @@ -0,0 +1,19 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_WALLET_TEST_UTIL_H +#define BITCOIN_WALLET_TEST_UTIL_H + +#include + +class CChain; +class CKey; +class CWallet; +namespace interfaces { +class Chain; +} // namespace interfaces + +std::unique_ptr CreateSyncedWallet(interfaces::Chain& chain, CChain& cchain, const CKey& key); + +#endif // BITCOIN_WALLET_TEST_UTIL_H diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index a0070b8dd3..75a08b6f74 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -480,20 +481,7 @@ class ListCoinsTestingSetup : public TestChain100Setup ListCoinsTestingSetup() { CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); - wallet = std::make_unique(m_node.chain.get(), "", CreateMockWalletDatabase()); - { - LOCK2(wallet->cs_wallet, ::cs_main); - wallet->SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); - } - wallet->LoadWallet(); - AddKey(*wallet, coinbaseKey); - WalletRescanReserver reserver(*wallet); - reserver.reserve(); - CWallet::ScanResult result = wallet->ScanForWalletTransactions(m_node.chainman->ActiveChain().Genesis()->GetBlockHash(), 0 /* start_height */, {} /* max_height */, reserver, false /* update */); - BOOST_CHECK_EQUAL(result.status, CWallet::ScanResult::SUCCESS); - BOOST_CHECK_EQUAL(result.last_scanned_block, m_node.chainman->ActiveChain().Tip()->GetBlockHash()); - BOOST_CHECK_EQUAL(*result.last_scanned_height, m_node.chainman->ActiveChain().Height()); - BOOST_CHECK(result.last_failed_block.IsNull()); + wallet = CreateSyncedWallet(*m_node.chain, m_node.chainman->ActiveChain(), coinbaseKey); } ~ListCoinsTestingSetup() From fe6dc76b7c9c5405f37464a3b19fcf82aaf22861 Mon Sep 17 00:00:00 2001 From: Russell Yanofsky Date: Fri, 4 Jun 2021 18:38:13 -0400 Subject: [PATCH 009/112] wallet test: Add test for subtract fee from recipient behavior Behavior might have recently changed in #17331 (it is not clear) but not noticed because there is no test coverage. This adds test coverage for current subtract from recipient behavior without changing it. Co-authored-by: Andrew Chow --- src/Makefile.test.include | 1 + src/wallet/test/spend_tests.cpp | 61 +++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 src/wallet/test/spend_tests.cpp diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 1374f3892e..344590fa13 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -151,6 +151,7 @@ BITCOIN_TESTS =\ if ENABLE_WALLET BITCOIN_TESTS += \ wallet/test/psbt_wallet_tests.cpp \ + wallet/test/spend_tests.cpp \ wallet/test/wallet_tests.cpp \ wallet/test/walletdb_tests.cpp \ wallet/test/wallet_crypto_tests.cpp \ diff --git a/src/wallet/test/spend_tests.cpp b/src/wallet/test/spend_tests.cpp new file mode 100644 index 0000000000..66e7de4273 --- /dev/null +++ b/src/wallet/test/spend_tests.cpp @@ -0,0 +1,61 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include + +#include + +BOOST_FIXTURE_TEST_SUITE(spend_tests, WalletTestingSetup) + +BOOST_FIXTURE_TEST_CASE(SubtractFee, TestChain100Setup) +{ + CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); + auto wallet = CreateSyncedWallet(*m_node.chain, m_node.chainman->ActiveChain(), coinbaseKey); + + // Check that a subtract-from-recipient transaction slightly less than the + // coinbase input amount does not create a change output (because it would + // be uneconomical to add and spend the output), and make sure it pays the + // leftover input amount which would have been change to the recipient + // instead of the miner. + auto check_tx = [&wallet](CAmount leftover_input_amount) { + CRecipient recipient{GetScriptForRawPubKey({}), 50 * COIN - leftover_input_amount, true /* subtract fee */}; + CTransactionRef tx; + CAmount fee; + int change_pos = -1; + bilingual_str error; + CCoinControl coin_control; + coin_control.m_feerate.emplace(10000); + coin_control.fOverrideFeeRate = true; + FeeCalculation fee_calc; + BOOST_CHECK(wallet->CreateTransaction({recipient}, tx, fee, change_pos, error, coin_control, fee_calc)); + BOOST_CHECK_EQUAL(tx->vout.size(), 1); + BOOST_CHECK_EQUAL(tx->vout[0].nValue, recipient.nAmount + leftover_input_amount - fee); + BOOST_CHECK_GT(fee, 0); + return fee; + }; + + // Send full input amount to recipient, check that only nonzero fee is + // subtracted (to_reduce == fee). + const CAmount fee{check_tx(0)}; + + // Send slightly less than full input amount to recipient, check leftover + // input amount is paid to recipient not the miner (to_reduce == fee - 123) + BOOST_CHECK_EQUAL(fee, check_tx(123)); + + // Send full input minus fee amount to recipient, check leftover input + // amount is paid to recipient not the miner (to_reduce == 0) + BOOST_CHECK_EQUAL(fee, check_tx(fee)); + + // Send full input minus more than the fee amount to recipient, check + // leftover input amount is paid to recipient not the miner (to_reduce == + // -123). This overpays the recipient instead of overpaying the miner more + // than double the neccesary fee. + BOOST_CHECK_EQUAL(fee, check_tx(fee + 123)); +} + +BOOST_AUTO_TEST_SUITE_END() From faa670d3862783017f5cd1491f37648e1875f19f Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Fri, 18 Jun 2021 10:25:16 +0200 Subject: [PATCH 010/112] test: Properly set BIP34 height in CreateNewBlock_validity unit test --- src/test/miner_tests.cpp | 59 ++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 36 deletions(-) diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index e20c5e4e8f..5b991de113 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -51,36 +51,25 @@ BlockAssembler MinerTestingSetup::AssemblerForTest(const CChainParams& params) constexpr static struct { unsigned char extranonce; unsigned int nonce; -} blockinfo[] = { - {4, 0xa4a3e223}, {2, 0x15c32f9e}, {1, 0x0375b547}, {1, 0x7004a8a5}, - {2, 0xce440296}, {2, 0x52cfe198}, {1, 0x77a72cd0}, {2, 0xbb5d6f84}, - {2, 0x83f30c2c}, {1, 0x48a73d5b}, {1, 0xef7dcd01}, {2, 0x6809c6c4}, - {2, 0x0883ab3c}, {1, 0x087bbbe2}, {2, 0x2104a814}, {2, 0xdffb6daa}, - {1, 0xee8a0a08}, {2, 0xba4237c1}, {1, 0xa70349dc}, {1, 0x344722bb}, - {3, 0xd6294733}, {2, 0xec9f5c94}, {2, 0xca2fbc28}, {1, 0x6ba4f406}, - {2, 0x015d4532}, {1, 0x6e119b7c}, {2, 0x43e8f314}, {2, 0x27962f38}, - {2, 0xb571b51b}, {2, 0xb36bee23}, {2, 0xd17924a8}, {2, 0x6bc212d9}, - {1, 0x630d4948}, {2, 0x9a4c4ebb}, {2, 0x554be537}, {1, 0xd63ddfc7}, - {2, 0xa10acc11}, {1, 0x759a8363}, {2, 0xfb73090d}, {1, 0xe82c6a34}, - {1, 0xe33e92d7}, {3, 0x658ef5cb}, {2, 0xba32ff22}, {5, 0x0227a10c}, - {1, 0xa9a70155}, {5, 0xd096d809}, {1, 0x37176174}, {1, 0x830b8d0f}, - {1, 0xc6e3910e}, {2, 0x823f3ca8}, {1, 0x99850849}, {1, 0x7521fb81}, - {1, 0xaacaabab}, {1, 0xd645a2eb}, {5, 0x7aea1781}, {5, 0x9d6e4b78}, - {1, 0x4ce90fd8}, {1, 0xabdc832d}, {6, 0x4a34f32a}, {2, 0xf2524c1c}, - {2, 0x1bbeb08a}, {1, 0xad47f480}, {1, 0x9f026aeb}, {1, 0x15a95049}, - {2, 0xd1cb95b2}, {2, 0xf84bbda5}, {1, 0x0fa62cd1}, {1, 0xe05f9169}, - {1, 0x78d194a9}, {5, 0x3e38147b}, {5, 0x737ba0d4}, {1, 0x63378e10}, - {1, 0x6d5f91cf}, {2, 0x88612eb8}, {2, 0xe9639484}, {1, 0xb7fabc9d}, - {2, 0x19b01592}, {1, 0x5a90dd31}, {2, 0x5bd7e028}, {2, 0x94d00323}, - {1, 0xa9b9c01a}, {1, 0x3a40de61}, {1, 0x56e7eec7}, {5, 0x859f7ef6}, - {1, 0xfd8e5630}, {1, 0x2b0c9f7f}, {1, 0xba700e26}, {1, 0x7170a408}, - {1, 0x70de86a8}, {1, 0x74d64cd5}, {1, 0x49e738a1}, {2, 0x6910b602}, - {0, 0x643c565f}, {1, 0x54264b3f}, {2, 0x97ea6396}, {2, 0x55174459}, - {2, 0x03e8779a}, {1, 0x98f34d8f}, {1, 0xc07b2b07}, {1, 0xdfe29668}, - {1, 0x3141c7c1}, {1, 0xb3b595f4}, {1, 0x735abf08}, {5, 0x623bfbce}, - {2, 0xd351e722}, {1, 0xf4ca48c9}, {1, 0x5b19c670}, {1, 0xa164bf0e}, - {2, 0xbbbeb305}, {2, 0xfe1c810a}, -}; +} BLOCKINFO[]{{8, 582909131}, {0, 971462344}, {2, 1169481553}, {6, 66147495}, {7, 427785981}, {8, 80538907}, + {8, 207348013}, {2, 1951240923}, {4, 215054351}, {1, 491520534}, {8, 1282281282}, {4, 639565734}, + {3, 248274685}, {8, 1160085976}, {6, 396349768}, {5, 393780549}, {5, 1096899528}, {4, 965381630}, + {0, 728758712}, {5, 318638310}, {3, 164591898}, {2, 274234550}, {2, 254411237}, {7, 561761812}, + {2, 268342573}, {0, 402816691}, {1, 221006382}, {6, 538872455}, {7, 393315655}, {4, 814555937}, + {7, 504879194}, {6, 467769648}, {3, 925972193}, {2, 200581872}, {3, 168915404}, {8, 430446262}, + {5, 773507406}, {3, 1195366164}, {0, 433361157}, {3, 297051771}, {0, 558856551}, {2, 501614039}, + {3, 528488272}, {2, 473587734}, {8, 230125274}, {2, 494084400}, {4, 357314010}, {8, 60361686}, + {7, 640624687}, {3, 480441695}, {8, 1424447925}, {4, 752745419}, {1, 288532283}, {6, 669170574}, + {5, 1900907591}, {3, 555326037}, {3, 1121014051}, {0, 545835650}, {8, 189196651}, {5, 252371575}, + {0, 199163095}, {6, 558895874}, {6, 1656839784}, {6, 815175452}, {6, 718677851}, {5, 544000334}, + {0, 340113484}, {6, 850744437}, {4, 496721063}, {8, 524715182}, {6, 574361898}, {6, 1642305743}, + {6, 355110149}, {5, 1647379658}, {8, 1103005356}, {7, 556460625}, {3, 1139533992}, {5, 304736030}, + {2, 361539446}, {2, 143720360}, {6, 201939025}, {7, 423141476}, {4, 574633709}, {3, 1412254823}, + {4, 873254135}, {0, 341817335}, {6, 53501687}, {3, 179755410}, {5, 172209688}, {8, 516810279}, + {4, 1228391489}, {8, 325372589}, {6, 550367589}, {0, 876291812}, {7, 412454120}, {7, 717202854}, + {2, 222677843}, {6, 251778867}, {7, 842004420}, {7, 194762829}, {4, 96668841}, {1, 925485796}, + {0, 792342903}, {6, 678455063}, {6, 773251385}, {5, 186617471}, {6, 883189502}, {7, 396077336}, + {8, 254702874}, {0, 455592851}}; static CBlockIndex CreateBlockIndex(int nHeight, CBlockIndex* active_chain_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { @@ -220,20 +209,18 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) // We can't make transactions until we have inputs // Therefore, load 110 blocks :) - static_assert(std::size(blockinfo) == 110, "Should have 110 blocks to import"); + static_assert(std::size(BLOCKINFO) == 110, "Should have 110 blocks to import"); int baseheight = 0; std::vector txFirst; - for (const auto& bi : blockinfo) { + for (const auto& bi : BLOCKINFO) { CBlock *pblock = &pblocktemplate->block; // pointer for convenience { LOCK(cs_main); - pblock->nVersion = 1; + pblock->nVersion = VERSIONBITS_TOP_BITS; pblock->nTime = m_node.chainman->ActiveChain().Tip()->GetMedianTimePast()+1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; - txCoinbase.vin[0].scriptSig = CScript(); - txCoinbase.vin[0].scriptSig.push_back(bi.extranonce); - txCoinbase.vin[0].scriptSig.push_back(m_node.chainman->ActiveChain().Height()); + txCoinbase.vin[0].scriptSig = CScript{} << (m_node.chainman->ActiveChain().Height() + 1) << bi.extranonce; txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this) txCoinbase.vout[0].scriptPubKey = CScript(); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); From fac90c55be478f0323eafa1d560ea2c56f04fb23 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 3 Jul 2019 16:14:55 +0200 Subject: [PATCH 011/112] test: Create all blocks with version 4 or higher --- test/functional/test_framework/blocktools.py | 3 ++- test/functional/test_framework/messages.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index f35ea6c122..fe441a093e 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -59,6 +59,7 @@ WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]} +VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4 def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None): @@ -66,7 +67,7 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl block = CBlock() if tmpl is None: tmpl = {} - block.nVersion = version or tmpl.get('version') or 1 + block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600) block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10) if tmpl and not tmpl.get('bits') is None: diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 5a9736a7a3..f3be9c4e5f 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -615,7 +615,7 @@ def __init__(self, header=None): self.calc_sha256() def set_null(self): - self.nVersion = 1 + self.nVersion = 4 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 From 222290f54388270937cb6c174195717e2214ec0d Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 3 Jul 2019 16:37:00 +0200 Subject: [PATCH 012/112] test: Set BIP34Height = 2 for regtest --- src/chainparams.cpp | 2 +- src/test/validation_block_tests.cpp | 18 +++++++++--------- test/functional/feature_block.py | 5 ++++- test/functional/rpc_blockchain.py | 2 +- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index fdaadeed4a..1a0af6ffea 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -390,7 +390,7 @@ class CRegTestParams : public CChainParams { consensus.signet_challenge.clear(); consensus.nSubsidyHalvingInterval = 150; consensus.BIP16Exception = uint256(); - consensus.BIP34Height = 500; // BIP34 activated on regtest (Used in functional tests) + consensus.BIP34Height = 2; // BIP34 activated on regtest (Block at height 1 not enforced for testing purposes) consensus.BIP34Hash = uint256(); consensus.BIP65Height = 1351; // BIP65 activated on regtest (Used in functional tests) consensus.BIP66Height = 1251; // BIP66 activated on regtest (Used in functional tests) diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index e0bc10d660..8f4ff6815b 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -77,6 +77,8 @@ std::shared_ptr MinerTestingSetup::Block(const uint256& prev_hash) txCoinbase.vout[1].nValue = txCoinbase.vout[0].nValue; txCoinbase.vout[0].nValue = 0; txCoinbase.vin[0].scriptWitness.SetNull(); + // Always pad with OP_0 at the end to avoid bad-cb-length error + txCoinbase.vin[0].scriptSig = CScript{} << WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(prev_hash)->nHeight + 1) << OP_0; pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); return pblock; @@ -84,8 +86,8 @@ std::shared_ptr MinerTestingSetup::Block(const uint256& prev_hash) std::shared_ptr MinerTestingSetup::FinalizeBlock(std::shared_ptr pblock) { - LOCK(cs_main); // For m_node.chainman->m_blockman.LookupBlockIndex - GenerateCoinbaseCommitment(*pblock, m_node.chainman->m_blockman.LookupBlockIndex(pblock->hashPrevBlock), Params().GetConsensus()); + const CBlockIndex* prev_block{WITH_LOCK(::cs_main, return m_node.chainman->m_blockman.LookupBlockIndex(pblock->hashPrevBlock))}; + GenerateCoinbaseCommitment(*pblock, prev_block, Params().GetConsensus()); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); @@ -93,6 +95,11 @@ std::shared_ptr MinerTestingSetup::FinalizeBlock(std::shared_ptr ++(pblock->nNonce); } + // submit block header, so that miner can get the block height from the + // global state and the node has the topology of the chain + BlockValidationState ignored; + BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders({pblock->GetBlockHeader()}, ignored, Params())); + return pblock; } @@ -147,13 +154,6 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) } bool ignored; - BlockValidationState state; - std::vector headers; - std::transform(blocks.begin(), blocks.end(), std::back_inserter(headers), [](std::shared_ptr b) { return b->GetBlockHeader(); }); - - // Process all the headers so we understand the toplogy of the chain - BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlockHeaders(headers, state, Params())); - // Connect the genesis block and drain any outstanding events BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlock(Params(), std::make_shared(Params().GenesisBlock()), true, &ignored)); SyncWithValidationInterfaceQueue(); diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 158efb52c9..d06f030022 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -373,7 +373,9 @@ def run_test(self): # b30 has a max-sized coinbase scriptSig. self.move_tip(23) b30 = self.next_block(30) - b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 + b30.vtx[0].vin[0].scriptSig = bytes(b30.vtx[0].vin[0].scriptSig) # Convert CScript to raw bytes + b30.vtx[0].vin[0].scriptSig += b'\x00' * (100 - len(b30.vtx[0].vin[0].scriptSig)) # Fill with 0s + assert_equal(len(b30.vtx[0].vin[0].scriptSig), 100) b30.vtx[0].rehash() b30 = self.update_block(30, []) self.send_blocks([b30], True) @@ -817,6 +819,7 @@ def run_test(self): b61.vtx[0].rehash() b61 = self.update_block(61, []) assert_equal(duplicate_tx.serialize(), b61.vtx[0].serialize()) + # BIP30 is always checked on regtest, regardless of the BIP34 activation height self.send_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True) # Test BIP30 (allow duplicate if spent) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 00324347ed..b3f0fc6e2e 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -129,7 +129,7 @@ def _test_getblockchaininfo(self): assert_greater_than(res['size_on_disk'], 0) assert_equal(res['softforks'], { - 'bip34': {'type': 'buried', 'active': False, 'height': 500}, + 'bip34': {'type': 'buried', 'active': True, 'height': 2}, 'bip66': {'type': 'buried', 'active': False, 'height': 1251}, 'bip65': {'type': 'buried', 'active': False, 'height': 1351}, 'csv': {'type': 'buried', 'active': False, 'height': 432}, From a084ebe1330bcec15715e08b0f65319142927ad1 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Wed, 30 Jun 2021 23:40:39 +0200 Subject: [PATCH 013/112] test: introduce `get_weight()` helper for CTransaction --- test/functional/feature_segwit.py | 12 ++++++------ test/functional/p2p_segwit.py | 6 ++---- test/functional/test_framework/messages.py | 9 ++++++--- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index 42910904d7..92cc260f48 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -260,8 +260,8 @@ def run_test(self): assert_equal(int(self.nodes[0].getmempoolentry(txid1)["wtxid"], 16), tx1.calc_sha256(True)) # Check that weight and vsize are properly reported in mempool entry (txid1) - assert_equal(self.nodes[0].getmempoolentry(txid1)["vsize"], (self.nodes[0].getmempoolentry(txid1)["weight"] + 3) // 4) - assert_equal(self.nodes[0].getmempoolentry(txid1)["weight"], len(tx1.serialize_without_witness())*3 + len(tx1.serialize_with_witness())) + assert_equal(self.nodes[0].getmempoolentry(txid1)["vsize"], tx1.get_vsize()) + assert_equal(self.nodes[0].getmempoolentry(txid1)["weight"], tx1.get_weight()) # Now create tx2, which will spend from txid1. tx = CTransaction() @@ -276,8 +276,8 @@ def run_test(self): assert_equal(int(self.nodes[0].getmempoolentry(txid2)["wtxid"], 16), tx.calc_sha256(True)) # Check that weight and vsize are properly reported in mempool entry (txid2) - assert_equal(self.nodes[0].getmempoolentry(txid2)["vsize"], (self.nodes[0].getmempoolentry(txid2)["weight"] + 3) // 4) - assert_equal(self.nodes[0].getmempoolentry(txid2)["weight"], len(tx.serialize_without_witness())*3 + len(tx.serialize_with_witness())) + assert_equal(self.nodes[0].getmempoolentry(txid2)["vsize"], tx.get_vsize()) + assert_equal(self.nodes[0].getmempoolentry(txid2)["weight"], tx.get_weight()) # Now create tx3, which will spend from txid2 tx = CTransaction() @@ -299,8 +299,8 @@ def run_test(self): assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True)) # Check that weight and vsize are properly reported in mempool entry (txid3) - assert_equal(self.nodes[0].getmempoolentry(txid3)["vsize"], (self.nodes[0].getmempoolentry(txid3)["weight"] + 3) // 4) - assert_equal(self.nodes[0].getmempoolentry(txid3)["weight"], len(tx.serialize_without_witness())*3 + len(tx.serialize_with_witness())) + assert_equal(self.nodes[0].getmempoolentry(txid3)["vsize"], tx.get_vsize()) + assert_equal(self.nodes[0].getmempoolentry(txid3)["weight"], tx.get_weight()) # Mine a block to clear the gbt cache again. self.nodes[0].generate(1) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 95c7aec318..f34908e134 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -4,7 +4,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test segwit transactions and blocks on P2P network.""" from decimal import Decimal -import math import random import struct import time @@ -1367,10 +1366,9 @@ def test_tx_relay_after_segwit_activation(self): raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1) assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True)) assert_equal(raw_tx["size"], len(tx3.serialize_with_witness())) - weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness()) - vsize = math.ceil(weight / 4) + vsize = tx3.get_vsize() assert_equal(raw_tx["vsize"], vsize) - assert_equal(raw_tx["weight"], weight) + assert_equal(raw_tx["weight"], tx3.get_weight()) assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1) assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex()) assert vsize != raw_tx["size"] diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 504c8c70d4..8d0bd9f69a 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -590,12 +590,15 @@ def is_valid(self): return False return True - # Calculate the virtual transaction size using witness and non-witness + # Calculate the transaction weight using witness and non-witness # serialization size (does NOT use sigops). - def get_vsize(self): + def get_weight(self): with_witness_size = len(self.serialize_with_witness()) without_witness_size = len(self.serialize_without_witness()) - return math.ceil(((WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size) / WITNESS_SCALE_FACTOR) + return (WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size + + def get_vsize(self): + return math.ceil(self.get_weight() / WITNESS_SCALE_FACTOR) def __repr__(self): return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \ From 4af97c74edcda56cd15523bf3a335adea2bad14a Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Thu, 1 Jul 2021 00:10:43 +0200 Subject: [PATCH 014/112] test: introduce `get_weight()` helper for CBlock --- test/functional/p2p_segwit.py | 3 +-- test/functional/test_framework/messages.py | 7 +++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index f34908e134..74eda6620f 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -450,8 +450,7 @@ def test_block_relay(self): rpc_details = self.nodes[0].getblock(block.hash, True) assert_equal(rpc_details["size"], len(block.serialize())) assert_equal(rpc_details["strippedsize"], len(block.serialize(False))) - weight = 3 * len(block.serialize(False)) + len(block.serialize()) - assert_equal(rpc_details["weight"], weight) + assert_equal(rpc_details["weight"], block.get_weight()) # Upgraded node should not ask for blocks from unupgraded block4 = self.build_next_block(version=4) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 8d0bd9f69a..1abe604b28 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -746,6 +746,13 @@ def solve(self): self.nNonce += 1 self.rehash() + # Calculate the block weight using witness and non-witness + # serialization size (does NOT use sigops). + def get_weight(self): + with_witness_size = len(self.serialize(with_witness=True)) + without_witness_size = len(self.serialize(with_witness=False)) + return (WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size + def __repr__(self): return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \ % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, From 607076d01bf23c69ac21950c17b01fb4e1130774 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Thu, 1 Jul 2021 01:43:45 +0200 Subject: [PATCH 015/112] test: remove confusing `MAX_BLOCK_BASE_SIZE` The constant `MAX_BLOCK_BASE_SIZE` has been removed from the core implementation years ago due to being confusing and superfluous, as it is implied by the block weight limit (see PRs #10618 and #10608). Since there is also no point in still keeping it in the functional test framework, we switch to weight-based accounting on the relevant test code parts and use `MAX_BLOCK_WEIGHT` instead for the block limit checks. --- test/functional/feature_block.py | 40 +++++++++---------- test/functional/mempool_accept.py | 4 +- .../mining_prioritisetransaction.py | 8 ++-- test/functional/p2p_segwit.py | 28 ++++--------- test/functional/test_framework/messages.py | 2 +- 5 files changed, 35 insertions(+), 47 deletions(-) diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 389db73bc9..75d4823bfb 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -22,7 +22,7 @@ CTransaction, CTxIn, CTxOut, - MAX_BLOCK_BASE_SIZE, + MAX_BLOCK_WEIGHT, uint256_from_compact, uint256_from_str, ) @@ -307,33 +307,33 @@ def run_test(self): b22 = self.next_block(22, spend=out[5]) self.send_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase', reconnect=True) - # Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected + # Create a block on either side of MAX_BLOCK_WEIGHT and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) - self.log.info("Accept a block of size MAX_BLOCK_BASE_SIZE") + self.log.info("Accept a block of weight MAX_BLOCK_WEIGHT") self.move_tip(15) b23 = self.next_block(23, spend=out[6]) tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69 + script_length = (MAX_BLOCK_WEIGHT - b23.get_weight() - 276) // 4 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = self.update_block(23, [tx]) - # Make sure the math above worked out to produce a max-sized block - assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE) + # Make sure the math above worked out to produce a max-weighted block + assert_equal(b23.get_weight(), MAX_BLOCK_WEIGHT) self.send_blocks([b23], True) self.save_spendable_output() - self.log.info("Reject a block of size MAX_BLOCK_BASE_SIZE + 1") + self.log.info("Reject a block of weight MAX_BLOCK_WEIGHT + 4") self.move_tip(15) b24 = self.next_block(24, spend=out[6]) - script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69 + script_length = (MAX_BLOCK_WEIGHT - b24.get_weight() - 276) // 4 script_output = CScript([b'\x00' * (script_length + 1)]) tx.vout = [CTxOut(0, script_output)] b24 = self.update_block(24, [tx]) - assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE + 1) + assert_equal(b24.get_weight(), MAX_BLOCK_WEIGHT + 1 * 4) self.send_blocks([b24], success=False, reject_reason='bad-blk-length', reconnect=True) b25 = self.next_block(25, spend=out[7]) @@ -485,13 +485,13 @@ def run_test(self): # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE tx_new = None tx_last = tx - total_size = len(b39.serialize()) - while(total_size < MAX_BLOCK_BASE_SIZE): + total_weight = b39.get_weight() + while total_weight < MAX_BLOCK_WEIGHT: tx_new = self.create_tx(tx_last, 1, 1, p2sh_script) tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() - total_size += len(tx_new.serialize()) - if total_size >= MAX_BLOCK_BASE_SIZE: + total_weight += tx_new.get_weight() + if total_weight >= MAX_BLOCK_WEIGHT: break b39.vtx.append(tx_new) # add tx to block tx_last = tx_new @@ -502,7 +502,7 @@ def run_test(self): # Make sure we didn't accidentally make too big a block. Note that the # size of the block has non-determinism due to the ECDSA signature in # the first transaction. - while (len(b39.serialize()) >= MAX_BLOCK_BASE_SIZE): + while b39.get_weight() >= MAX_BLOCK_WEIGHT: del b39.vtx[-1] b39 = self.update_block(39, []) @@ -892,7 +892,7 @@ def run_test(self): self.send_blocks([b63], success=False, reject_reason='bad-txns-nonfinal', reconnect=True) # This checks that a block with a bloated VARINT between the block_header and the array of tx such that - # the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint, + # the block is > MAX_BLOCK_WEIGHT with the bloated varint, but <= MAX_BLOCK_WEIGHT without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # @@ -917,12 +917,12 @@ def run_test(self): tx = CTransaction() # use canonical serialization to calculate size - script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69 + script_length = (MAX_BLOCK_WEIGHT - 4 * len(b64a.normal_serialize()) - 276) // 4 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = self.update_block("64a", [tx]) - assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8) + assert_equal(b64a.get_weight(), MAX_BLOCK_WEIGHT + 8 * 4) self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()') # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently @@ -936,7 +936,7 @@ def run_test(self): b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) - assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE) + assert_equal(b64.get_weight(), MAX_BLOCK_WEIGHT) self.blocks[64] = b64 b64 = self.update_block(64, []) self.send_blocks([b64], True) @@ -1270,12 +1270,12 @@ def run_test(self): for i in range(89, LARGE_REORG_SIZE + 89): b = self.next_block(i, spend) tx = CTransaction() - script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69 + script_length = (MAX_BLOCK_WEIGHT - b.get_weight() - 276) // 4 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = self.update_block(i, [tx]) - assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE) + assert_equal(b.get_weight(), MAX_BLOCK_WEIGHT) blocks.append(b) self.save_spendable_output() spend = self.get_spendable_output() diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 1705d957aa..ac7e78b92e 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -14,7 +14,7 @@ COIN, COutPoint, CTxOut, - MAX_BLOCK_BASE_SIZE, + MAX_BLOCK_WEIGHT, MAX_MONEY, tx_from_hex, ) @@ -207,7 +207,7 @@ def run_test(self): self.log.info('A really large transaction') tx = tx_from_hex(raw_tx_reference) - tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize())) + tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_WEIGHT // 4 / len(tx.vin[0].serialize())) self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}], rawtxs=[tx.serialize().hex()], diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index 1426fdaacb..9fc38ebf53 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -6,7 +6,7 @@ import time -from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE +from test_framework.messages import COIN, MAX_BLOCK_WEIGHT from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts @@ -61,15 +61,15 @@ def run_test(self): txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee) # Make sure that the size of each group of transactions exceeds - # MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create - # more transactions. + # MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to + # create more transactions. mempool = self.nodes[0].getrawmempool(True) sizes = [0, 0, 0] for i in range(3): for j in txids[i]: assert j in mempool sizes[i] += mempool[j]['vsize'] - assert sizes[i] > MAX_BLOCK_BASE_SIZE # Fail => raise utxo_count + assert sizes[i] > MAX_BLOCK_WEIGHT // 4 # Fail => raise utxo_count # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 74eda6620f..64cd0e7b42 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -21,7 +21,7 @@ CTxInWitness, CTxOut, CTxWitness, - MAX_BLOCK_BASE_SIZE, + MAX_BLOCK_WEIGHT, MSG_BLOCK, MSG_TX, MSG_WITNESS_FLAG, @@ -110,16 +110,6 @@ def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key): tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script] tx_to.rehash() -def get_virtual_size(witness_block): - """Calculate the virtual size of a witness block. - - Virtual size is base + witness/4.""" - base_size = len(witness_block.serialize(with_witness=False)) - total_size = len(witness_block.serialize()) - # the "+3" is so we round up - vsize = int((3 * base_size + total_size + 3) / 4) - return vsize - def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None): """Send a transaction to the node and check that it's accepted to the mempool @@ -902,7 +892,7 @@ def test_block_malleability(self): block.solve() block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000) - assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE + assert block.get_weight() > MAX_BLOCK_WEIGHT # We can't send over the p2p network, because this is too big to relay # TODO: repeat this test with a block that can be relayed @@ -911,7 +901,7 @@ def test_block_malleability(self): assert self.nodes[0].getbestblockhash() != block.hash block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop() - assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE + assert block.get_weight() < MAX_BLOCK_WEIGHT assert_equal(None, self.nodes[0].submitblock(block.serialize().hex())) assert self.nodes[0].getbestblockhash() == block.hash @@ -974,11 +964,10 @@ def test_witness_block_size(self): child_tx.rehash() self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) - vsize = get_virtual_size(block) - additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4 + additional_bytes = MAX_BLOCK_WEIGHT - block.get_weight() i = 0 while additional_bytes > 0: - # Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1 + # Add some more bytes to each input until we hit MAX_BLOCK_WEIGHT+1 extra_bytes = min(additional_bytes + 1, 55) block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes) additional_bytes -= extra_bytes @@ -987,8 +976,7 @@ def test_witness_block_size(self): block.vtx[0].vout.pop() # Remove old commitment add_witness_commitment(block) block.solve() - vsize = get_virtual_size(block) - assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1) + assert_equal(block.get_weight(), MAX_BLOCK_WEIGHT + 1) # Make sure that our test case would exceed the old max-network-message # limit assert len(block.serialize()) > 2 * 1024 * 1024 @@ -1001,7 +989,7 @@ def test_witness_block_size(self): block.vtx[0].vout.pop() add_witness_commitment(block) block.solve() - assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE + assert block.get_weight() == MAX_BLOCK_WEIGHT test_witness_block(self.nodes[0], self.test_node, block, accepted=True) @@ -1727,7 +1715,7 @@ def test_signature_version_1(self): block.vtx.append(tx) # Test the block periodically, if we're close to maxblocksize - if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000): + if block.get_weight() > MAX_BLOCK_WEIGHT - 4000: self.update_witness_block_with_transactions(block, []) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) block = self.build_next_block() diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 1abe604b28..c0a1a9a8e4 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -32,7 +32,7 @@ from test_framework.util import hex_str_to_bytes, assert_equal MAX_LOCATOR_SZ = 101 -MAX_BLOCK_BASE_SIZE = 1000000 +MAX_BLOCK_WEIGHT = 4000000 MAX_BLOOM_FILTER_SIZE = 36000 MAX_BLOOM_HASH_FUNCS = 50 From 1ee6d0b01a517893967379677029fb5417978247 Mon Sep 17 00:00:00 2001 From: Russell Yanofsky Date: Thu, 8 Jul 2021 14:57:03 -0400 Subject: [PATCH 016/112] gui: Prompt to reset settings when settings.json cannot be read Fixes bitcoin/bitcoin#21340 Co-authored-by: Jarol Rodriguez --- src/qt/bitcoin.cpp | 57 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 3 deletions(-) diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index 442c813a5a..e29d8e957b 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -144,6 +144,58 @@ static void initTranslations(QTranslator &qtTranslatorBase, QTranslator &qtTrans QApplication::installTranslator(&translator); } +static std::string JoinErrors(const std::vector& errors) +{ + return Join(errors, "\n", [](const std::string& error) { return "- " + error; }); +} + +static bool InitSettings() +{ + if (!gArgs.GetSettingsPath()) { + return true; // Do nothing if settings file disabled. + } + + std::vector errors; + if (!gArgs.ReadSettingsFile(&errors)) { + bilingual_str error = _("Settings file could not be read"); + InitError(Untranslated(strprintf("%s:\n%s\n", error.original, JoinErrors(errors)))); + + QMessageBox messagebox(QMessageBox::Critical, PACKAGE_NAME, QString::fromStdString(strprintf("%s.", error.translated)), QMessageBox::Reset | QMessageBox::Abort); + /*: Explanatory text shown on startup when the settings file cannot be read. + Prompts user to make a choice between resetting or aborting. */ + messagebox.setInformativeText(QObject::tr("Do you want to reset settings to default values, or to abort without making changes?")); + messagebox.setDetailedText(QString::fromStdString(JoinErrors(errors))); + messagebox.setTextFormat(Qt::PlainText); + messagebox.setDefaultButton(QMessageBox::Reset); + switch (messagebox.exec()) { + case QMessageBox::Reset: + break; + case QMessageBox::Abort: + return false; + default: + assert(false); + } + } + + errors.clear(); + if (!gArgs.WriteSettingsFile(&errors)) { + bilingual_str error = _("Settings file could not be written"); + InitError(Untranslated(strprintf("%s:\n%s\n", error.original, JoinErrors(errors)))); + + QMessageBox messagebox(QMessageBox::Critical, PACKAGE_NAME, QString::fromStdString(strprintf("%s.", error.translated)), QMessageBox::Ok); + /*: Explanatory text shown on startup when the settings file could not be written. + Prompts user to check that we have the ability to write to the file. + Explains that the user has the option of running without a settings file.*/ + messagebox.setInformativeText(QObject::tr("A fatal error occured. Check that settings file is writable, or try running with -nosettings.")); + messagebox.setDetailedText(QString::fromStdString(JoinErrors(errors))); + messagebox.setTextFormat(Qt::PlainText); + messagebox.setDefaultButton(QMessageBox::Ok); + messagebox.exec(); + return false; + } + return true; +} + /* qDebug() message handler --> debug.log */ void DebugMessageHandler(QtMsgType type, const QMessageLogContext& context, const QString &msg) { @@ -569,9 +621,8 @@ int GuiMain(int argc, char* argv[]) // Parse URIs on command line -- this can affect Params() PaymentServer::ipcParseCommandLine(argc, argv); #endif - if (!gArgs.InitSettings(error)) { - InitError(Untranslated(error)); - QMessageBox::critical(nullptr, PACKAGE_NAME, QObject::tr("Error initializing settings: %1").arg(QString::fromStdString(error))); + + if (!InitSettings()) { return EXIT_FAILURE; } From 8a2b58db9ee6a14d36b5d8e430b35f18e7c7b0c5 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sun, 11 Jul 2021 15:51:48 +0200 Subject: [PATCH 017/112] test: fix segwit terminology (s/witness_program/witness_script/) --- test/functional/p2p_segwit.py | 180 +++++++++---------- test/functional/test_framework/blocktools.py | 4 +- 2 files changed, 92 insertions(+), 92 deletions(-) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index ead9d852fe..3ad25f183e 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -102,7 +102,7 @@ def __init__(self, sha256, n, value): self.nValue = value def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key): - """Add signature for a P2PK witness program.""" + """Add signature for a P2PK witness script.""" tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value) signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1') tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script] @@ -281,7 +281,7 @@ def run_test(self): self.test_submit_block() self.test_extra_witness_data() self.test_max_witness_push_length() - self.test_max_witness_program_length() + self.test_max_witness_script_length() self.test_witness_input_length() self.test_block_relay() self.test_tx_relay_after_segwit_activation() @@ -488,8 +488,8 @@ def test_v0_outputs_arent_spendable(self): self.disconnect_nodes(0, 2) # Create two outputs, a p2wsh and p2sh-p2wsh - witness_program = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) p2sh_script_pubkey = script_to_p2sh_script(script_pubkey) value = self.utxo[0].nValue // 3 @@ -624,9 +624,9 @@ def test_standardness_v0(self): V0 segwit outputs and inputs are always standard. V0 segwit inputs may only be mined after activation, but not before.""" - witness_program = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) - p2sh_script_pubkey = script_to_p2sh_script(witness_program) + witness_script = CScript([OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) + p2sh_script_pubkey = script_to_p2sh_script(witness_script) # First prepare a p2sh output (so that spending it will pass standardness) p2sh_tx = CTransaction() @@ -642,7 +642,7 @@ def test_standardness_v0(self): # Now test standardness of v0 P2WSH outputs. # Start by creating a transaction with two outputs. tx = CTransaction() - tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))] + tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_script]))] tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)] tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool @@ -653,14 +653,14 @@ def test_standardness_v0(self): test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True) # Now create something that looks like a P2PKH output. This won't be spendable. - witness_hash = sha256(witness_program) + witness_hash = sha256(witness_script) script_pubkey = CScript([OP_0, hash160(witness_hash)]) tx2 = CTransaction() # tx was accepted, so we spend the second output. tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")] tx2.vout = [CTxOut(7000, script_pubkey)] tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] tx2.rehash() test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True) @@ -673,7 +673,7 @@ def test_standardness_v0(self): tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))] tx3.wit.vtxinwit.append(CTxInWitness()) - tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script] tx3.rehash() if not self.segwit_active: # Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed @@ -731,8 +731,8 @@ def test_p2sh_witness(self): """Test P2SH wrapped witness programs.""" # Prepare the p2sh-wrapped witness output - witness_program = CScript([OP_DROP, OP_TRUE]) - p2wsh_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_DROP, OP_TRUE]) + p2wsh_pubkey = script_to_p2wsh_script(witness_script) script_pubkey = script_to_p2sh_script(p2wsh_pubkey) script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script @@ -776,7 +776,7 @@ def test_p2sh_witness(self): spend_tx.vin[0].scriptSig = script_sig spend_tx.rehash() spend_tx.wit.vtxinwit.append(CTxInWitness()) - spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program] + spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_script] # Verify mempool acceptance test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True) @@ -825,18 +825,18 @@ def test_witness_commitments(self): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - # Let's construct a witness program - witness_program = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + # Let's construct a witness script + witness_script = CScript([OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey)) tx.rehash() # tx2 will spend tx1, and send back to a regular anyone-can-spend address tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) - tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program)) + tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script)) tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] tx2.rehash() block_3 = self.build_next_block() @@ -871,7 +871,7 @@ def test_witness_commitments(self): block_4 = self.build_next_block() tx3 = CTransaction() tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) - tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program)) + tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_script)) tx3.rehash() block_4.vtx.append(tx3) block_4.hashMerkleRoot = block_4.calc_merkle_root() @@ -933,14 +933,14 @@ def test_witness_block_size(self): assert len(self.utxo) > 0 # Create a P2WSH transaction. - # The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE. + # The witness script will be a bunch of OP_2DROP's, followed by OP_TRUE. # This should give us plenty of room to tweak the spending tx's # virtual size. NUM_DROPS = 200 # 201 max ops per script! NUM_OUTPUTS = 50 - witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n) value = self.utxo[0].nValue @@ -960,7 +960,7 @@ def test_witness_block_size(self): child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] for _ in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) - child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program] + child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_script] child_tx.rehash() self.update_witness_block_with_transactions(block, [parent_tx, child_tx]) @@ -1041,8 +1041,8 @@ def test_extra_witness_data(self): block = self.build_next_block() - witness_program = CScript([OP_DROP, OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_DROP, OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) # First try extra witness data on a tx that doesn't require a witness tx = CTransaction() @@ -1073,7 +1073,7 @@ def test_extra_witness_data(self): tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()]) - tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_script] tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])] block = self.build_next_block() @@ -1113,8 +1113,8 @@ def test_max_witness_push_length(self): block = self.build_next_block() - witness_program = CScript([OP_DROP, OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_DROP, OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) @@ -1126,7 +1126,7 @@ def test_max_witness_push_length(self): tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) # First try a 521-byte stack element - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_script] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) @@ -1144,15 +1144,15 @@ def test_max_witness_push_length(self): self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) @subtest # type: ignore - def test_max_witness_program_length(self): + def test_max_witness_script_length(self): """Test that witness outputs greater than 10kB can't be spent.""" - MAX_PROGRAM_LENGTH = 10000 + MAX_WITNESS_SCRIPT_LENGTH = 10000 - # This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes. - long_witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE]) - assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1 - long_script_pubkey = script_to_p2wsh_script(long_witness_program) + # This script is 19 max pushes (9937 bytes), then 64 more opcode-bytes. + long_witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE]) + assert len(long_witness_script) == MAX_WITNESS_SCRIPT_LENGTH + 1 + long_script_pubkey = script_to_p2wsh_script(long_witness_script) block = self.build_next_block() @@ -1165,22 +1165,22 @@ def test_max_witness_program_length(self): tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE]))) tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_script] tx2.rehash() self.update_witness_block_with_transactions(block, [tx, tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) - # Try again with one less byte in the witness program - witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE]) - assert len(witness_program) == MAX_PROGRAM_LENGTH - script_pubkey = script_to_p2wsh_script(witness_program) + # Try again with one less byte in the witness script + witness_script = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE]) + assert len(witness_script) == MAX_WITNESS_SCRIPT_LENGTH + script_pubkey = script_to_p2wsh_script(witness_script) tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey) tx.rehash() tx2.vin[0].prevout.hash = tx.sha256 - tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_script] tx2.rehash() block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx, tx2]) @@ -1193,8 +1193,8 @@ def test_max_witness_program_length(self): def test_witness_input_length(self): """Test that vin length must match vtxinwit length.""" - witness_program = CScript([OP_DROP, OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_DROP, OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) # Create a transaction that splits our utxo into many outputs tx = CTransaction() @@ -1238,7 +1238,7 @@ def serialize_with_witness(self): # First try using a too long vtxinwit for i in range(11): tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program] + tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_script] block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) @@ -1254,15 +1254,15 @@ def serialize_with_witness(self): # Now make one of the intermediate witnesses be incorrect tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program] - tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program] + tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_script] + tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_script] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Fix the broken witness and the block should be accepted. - tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program] + tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_script] block.vtx = [block.vtx[0]] self.update_witness_block_with_transactions(block, [tx2]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) @@ -1300,8 +1300,8 @@ def test_tx_relay_after_segwit_activation(self): test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True) # Now try to add extra witness data to a valid witness tx. - witness_program = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) @@ -1312,10 +1312,10 @@ def test_tx_relay_after_segwit_activation(self): tx3.wit.vtxinwit.append(CTxInWitness()) # Add too-large for IsStandard witness and check that it does not enter reject filter - p2sh_program = CScript([OP_TRUE]) - witness_program2 = CScript([b'a' * 400000]) - tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_to_p2sh_script(p2sh_program))) - tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2] + p2sh_script = CScript([OP_TRUE]) + witness_script2 = CScript([b'a' * 400000]) + tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_to_p2sh_script(p2sh_script))) + tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script2] tx3.rehash() # Node will not be blinded to the transaction, requesting it any number of times @@ -1329,14 +1329,14 @@ def test_tx_relay_after_segwit_activation(self): # Remove witness stuffing, instead add extra witness push on stack tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])) - tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program] + tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_script] tx3.rehash() test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False) # Get rid of the extra witness, and verify acceptance. - tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script] # Also check that old_node gets a tx announcement, even though this is # a witness transaction. self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed @@ -1353,7 +1353,7 @@ def test_tx_relay_after_segwit_activation(self): assert_equal(raw_tx["vsize"], vsize) assert_equal(raw_tx["weight"], weight) assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1) - assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex()) + assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_script.hex()) assert vsize != raw_tx["size"] # Cleanup: mine the transactions and update utxo for next test @@ -1389,8 +1389,8 @@ def test_segwit_versions(self): self.sync_blocks() temp_utxo = [] tx = CTransaction() - witness_program = CScript([OP_TRUE]) - witness_hash = sha256(witness_program) + witness_script = CScript([OP_TRUE]) + witness_hash = sha256(witness_script) assert_equal(len(self.nodes[1].getrawmempool()), 0) for version in list(range(OP_1, OP_16 + 1)) + [OP_0]: # First try to spend to a future version segwit script_pubkey. @@ -1418,7 +1418,7 @@ def test_segwit_versions(self): tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")] tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)] tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] tx2.rehash() # Gets accepted to both policy-enforcing nodes and others. test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) @@ -1433,7 +1433,7 @@ def test_segwit_versions(self): tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx3.wit.vtxinwit.append(CTxInWitness()) total_value += i.nValue - tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] + tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_script] tx3.vout.append(CTxOut(total_value - 1000, script_pubkey)) tx3.rehash() @@ -1462,8 +1462,8 @@ def test_premature_coinbase_witness_spend(self): block = self.build_next_block() # Change the output of the block to be a witness output. - witness_program = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) block.vtx[0].vout[0].scriptPubKey = script_pubkey # This next line will rehash the coinbase and update the merkle # root, and solve. @@ -1472,9 +1472,9 @@ def test_premature_coinbase_witness_spend(self): spend_tx = CTransaction() spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")] - spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)] + spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_script)] spend_tx.wit.vtxinwit.append(CTxInWitness()) - spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_script] spend_tx.rehash() # Now test a premature spend. @@ -1523,8 +1523,8 @@ def test_uncompressed_pubkey(self): # Now try to spend it. Send it to a P2WSH output, which we'll # use in the next test. - witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) - script_wsh = script_to_p2wsh_script(witness_program) + witness_script = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) + script_wsh = script_to_p2wsh_script(witness_script) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) @@ -1553,7 +1553,7 @@ def test_uncompressed_pubkey(self): tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b"")) tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh)) tx3.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) + sign_p2pk_witness_input(witness_script, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) # Should fail policy test. test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') @@ -1570,7 +1570,7 @@ def test_uncompressed_pubkey(self): tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig)) tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey)) tx4.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) + sign_p2pk_witness_input(witness_script, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) # Should fail policy test. test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') @@ -1601,8 +1601,8 @@ def test_signature_version_1(self): key.generate() pubkey = key.get_pubkey().get_bytes() - witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([pubkey, CScriptOp(OP_CHECKSIG)]) + script_pubkey = script_to_p2wsh_script(witness_script) # First create a witness output for use in the tests. tx = CTransaction() @@ -1629,18 +1629,18 @@ def test_signature_version_1(self): tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey)) tx.wit.vtxinwit.append(CTxInWitness()) # Too-large input value - sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key) + sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue + 1, key) self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Too-small input value - sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key) + sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue - 1, key) block.vtx.pop() # remove last tx self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=False) # Now try correct value - sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key) + sign_p2pk_witness_input(witness_script, tx, 0, hashtype, prev_utxo.nValue, key) block.vtx.pop() self.update_witness_block_with_transactions(block, [tx]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) @@ -1661,7 +1661,7 @@ def test_signature_version_1(self): for _ in range(NUM_SIGHASH_TESTS): tx.vout.append(CTxOut(split_value, script_pubkey)) tx.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) + sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) for i in range(NUM_SIGHASH_TESTS): temp_utxos.append(UTXO(tx.sha256, i, split_value)) @@ -1696,7 +1696,7 @@ def test_signature_version_1(self): if random.randint(0, 1): anyonecanpay = SIGHASH_ANYONECANPAY hashtype = random.randint(1, 3) | anyonecanpay - sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key) + sign_p2pk_witness_input(witness_script, tx, i, hashtype, temp_utxos[i].nValue, key) if (hashtype == SIGHASH_SINGLE and i >= num_outputs): used_sighash_single_out_of_bounds = True tx.rehash() @@ -1726,7 +1726,7 @@ def test_signature_version_1(self): tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b"")) tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh)) tx.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) + sign_p2pk_witness_input(witness_script, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key) tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE]))) @@ -1766,7 +1766,7 @@ def test_signature_version_1(self): # the signatures as we go. tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b"")) tx.wit.vtxinwit.append(CTxInWitness()) - sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key) + sign_p2pk_witness_input(witness_script, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key) index += 1 block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx]) @@ -1965,8 +1965,8 @@ def test_witness_sigops(self): """Test sigop counting is correct inside witnesses.""" # Keep this under MAX_OPS_PER_SCRIPT (201) - witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF]) + script_pubkey = script_to_p2wsh_script(witness_script) sigops_per_script = 20 * 5 + 193 * 1 # We'll produce 2 extra outputs, one with a program that would take us @@ -1981,13 +1981,13 @@ def test_witness_sigops(self): # This script, when spent with the first # N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction, # would push us just over the block sigop limit. - witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF]) - script_pubkey_toomany = script_to_p2wsh_script(witness_program_toomany) + witness_script_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF]) + script_pubkey_toomany = script_to_p2wsh_script(witness_script_toomany) # If we spend this script instead, we would exactly reach our sigop # limit (for witness sigops). - witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF]) - script_pubkey_justright = script_to_p2wsh_script(witness_program_justright) + witness_script_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF]) + script_pubkey_justright = script_to_p2wsh_script(witness_script_justright) # First split our available utxo into a bunch of outputs split_value = self.utxo[0].nValue // outputs @@ -2010,9 +2010,9 @@ def test_witness_sigops(self): for i in range(outputs - 1): tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] + tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script] total_value += tx.vout[i].nValue - tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany] + tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_toomany] tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE]))) tx2.rehash() @@ -2051,7 +2051,7 @@ def test_witness_sigops(self): tx2.vout.pop() tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b"")) tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright] + tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_script_justright] tx2.rehash() self.update_witness_block_with_transactions(block_5, [tx2]) test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True) @@ -2119,8 +2119,8 @@ def received_wtxidrelay(): # Create a Segwit output from the latest UTXO # and announce it to the network - witness_program = CScript([OP_TRUE]) - script_pubkey = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) @@ -2132,7 +2132,7 @@ def received_wtxidrelay(): tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey)) tx2.wit.vtxinwit.append(CTxInWitness()) - tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program] + tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_script] tx2.rehash() # Announce Segwit transaction with wtxid diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 833a215993..9c4d9c46f0 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -210,8 +210,8 @@ def witness_script(use_p2wsh, pubkey): pkscript = key_to_p2wpkh_script(pubkey) else: # 1-of-1 multisig - witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG]) - pkscript = script_to_p2wsh_script(witness_program) + witness_script = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG]) + pkscript = script_to_p2wsh_script(witness_script) return pkscript.hex() def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount): From c020cbaa5c8e9e61b2b8efd8dc09be743fcd4273 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Wed, 14 Jul 2021 10:02:02 -0700 Subject: [PATCH 018/112] Squashed 'src/secp256k1/' changes from efad3506a8..be8d9c262f be8d9c262f Merge bitcoin-core/secp256k1#965: gen_context: Don't use any ASM aeece44599 gen_context: Don't use any ASM 7688a4f13a Merge bitcoin-core/secp256k1#963: "Schnorrsig API overhaul" fixups 90e83449b2 ci: Add C++ test f698caaff6 Use unsigned char consistently for byte arrays b5b8e7b719 Don't declare constants twice 769528f307 Don't use string literals for char arrays without NUL termination 2cc3cfa583 Fix -Wmissing-braces warning in clang 0440945fb5 Merge #844: schnorrsig API overhaul ec3aaa5014 Merge #960: tests_exhaustive: check the result of secp256k1_ecdsa_sign a1ee83c654 tests_exhaustive: check the result of secp256k1_ecdsa_sign 253f90cdeb Merge bitcoin-core/secp256k1#951: configure: replace AC_PATH_PROG to AC_CHECK_PROG 446d28d9de Merge bitcoin-core/secp256k1#944: Various improvements related to CFLAGS 0302138f75 ci: Make compiler warning into errors on CI b924e1e605 build: Ensure that configure's compile checks default to -O2 7939cd571c build: List *CPPFLAGS before *CFLAGS like on the compiler command line 595e8a35d8 build: Enable -Wcast-align=strict warning 07256267ff build: Use own variable SECP_CFLAGS instead of touching user CFLAGS 4866178dfc Merge bitcoin-core/secp256k1#955: Add random field multiply/square tests 75ce488c2a Merge bitcoin-core/secp256k1#959: tests: really test the non-var scalar inverse 41ed13942b tests: really test the non-var scalar inverse 5f6ceafcfa schnorrsig: allow setting MSGLEN != 32 in benchmark fdd06b7967 schnorrsig: add tests for sign_custom and varlen msg verification d8d806aaf3 schnorrsig: add extra parameter struct for sign_custom a0c3fc177f schnorrsig: allow signing and verification of variable length msgs 5a8e4991ad Add secp256k1_tagged_sha256 as defined in BIP-340 b6c0b72fb0 schnorrsig: remove noncefp args from sign; add sign_custom function bdf19f105c Add random field multiply/square tests 8ae56e33e7 Merge #879: Avoid passing out-of-bound pointers to 0-size memcpy a4642fa15e configure: replace AC_PATH_PROG to AC_CHECK_PROG 1758a92ffd Merge #950: ci: Add ppc64le build c58c4ea470 ci: Add ppc64le build 7973576f6e Merge #662: Add ecmult_gen, ecmult_const and ecmult to benchmark 8f879c2887 Fix array size in bench_ecmult 2fe1b50df1 Add ecmult_gen, ecmult_const and ecmult to benchmark 593e6bad9c Clean up ecmult_bench to make space for more benchmarks 50f3367712 Merge #947: ci: Run PRs on merge result even for i686 a35fdd3478 ci: Run PRs on merge result even for i686 442cee5baf schnorrsig: add algolen argument to nonce_function_hardened df3bfa12c3 schnorrsig: clarify result of calling nonce_function_bip340 without data 99e8614812 README: mention schnorrsig module 3dc8c072b6 Merge #846: ci: Run ASan/LSan and reorganize sanitizer and Valgrind jobs 02dcea1ad9 ci: Make test iterations configurable and tweak for sanitizer builds 489ff5c20a tests: Treat empty SECP2561_TEST_ITERS as if it was unset fcfcb97e74 ci: Simplify to use generic wrapper for QEMU, Valgrind, etc de4157f13a ci: Run ASan/LSan and reorganize sanitizer and Valgrind jobs 399722a63a Merge #941: Clean up git tree 09b3bb8648 Clean up git tree bf0ac46066 Merge #930: Add ARM32/ARM64 CI 202a030f7d Merge #850: add `secp256k1_ec_pubkey_cmp` method 1e78c18d5b Merge bitcoin-core/secp256k1#940: contrib: Explain explicit header guards 69394879b6 Merge #926: secp256k1.h: clarify that by default arguments must be != NULL 6eceec6d56 add `secp256k1_xonly_pubkey_cmp` method 0d9561ae87 add `secp256k1_ec_pubkey_cmp` method 22a9ea154a contrib: Explain explicit header guards 6c52ae8724 Merge #937: Have ge_set_gej_var, gej_double_var and ge_set_all_gej_var initialize all fields of their outputs. 185a6af227 Merge #925: changed include statements without prefix 'include/' 14c9739a1f tests: Improve secp256k1_ge_set_all_gej_var for some infinity inputs 4a19668c37 tests: Test secp256k1_ge_set_all_gej_var for all infinity inputs 3c90bdda95 change local lib headers to be relative for those pointing at "include/" dir 45b6468d7e Have secp256k1_ge_set_all_gej_var initialize all fields. Previous behaviour would not initialize r->y values in the case where infinity is passed in. Furthermore, the previous behaviour wouldn't initialize anything in the case where all inputs were infinity. 31c0f6de41 Have secp256k1_gej_double_var initialize all fields. Previous behaviour would not initialize r->x and r->y values in the case where infinity is passed in. dd6c3de322 Have secp256k1_ge_set_gej_var initialize all fields. Previous behaviour would not initialize r->x and r->y values in the case where infinity is passed in. d0bd2693e3 Merge bitcoin-core/secp256k1#936: Fix gen_context/ASM build on ARM 8bbad7a18e Add asm build to ARM32 CI 7d65ed5214 Add ARM32/ARM64 CI c8483520c9 Makefile.am: Don't pass a variable twice 2161f31785 Makefile.am: Honor config when building gen_context 99f47c20ec gen_context: Don't use external ASM because it complicates the build 98e0358d29 Merge #933: Avoids a missing brace warning in schnorrsig/tests_impl.h on old compilers 99e2d5be0d Avoids a missing brace warning in schnorrsig/tests_impl.h on old compilers. 34388af6b6 Merge #922: Add mingw32-w64/wine CI build 7012a188e6 Merge #928: Define SECP256K1_BUILD in secp256k1.c directly. ed5a199bed tests: fopen /dev/urandom in binary mode ae9e648526 Define SECP256K1_BUILD in secp256k1.c directly. 4dc37bf81b Add mingw32-w64/wine CI build 0881633dfd secp256k1.h: clarify that by default arguments must be != NULL 9570f674cc Avoid passing out-of-bound pointers to 0-size memcpy git-subtree-dir: src/secp256k1 git-subtree-split: be8d9c262f46309d9b4165b0498b71d704aba8fe --- .cirrus.yml | 202 ++++++++++++--- .gitignore | 9 + Makefile.am | 22 +- README.md | 4 +- build-aux/m4/bitcoin_secp.m4 | 16 ++ ci/cirrus.sh | 33 +-- ci/linux-debian.Dockerfile | 18 +- configure.ac | 120 ++++----- contrib/lax_der_parsing.c | 5 +- contrib/lax_der_parsing.h | 6 + contrib/lax_der_privatekey_parsing.c | 3 +- contrib/lax_der_privatekey_parsing.h | 6 + include/secp256k1.h | 60 ++++- include/secp256k1_extrakeys.h | 21 +- include/secp256k1_schnorrsig.h | 115 ++++++--- obj/.gitignore | 0 src/bench_ecdh.c | 4 +- src/bench_ecmult.c | 224 ++++++++++++++--- src/bench_internal.c | 4 +- src/bench_recover.c | 4 +- src/bench_schnorrsig.c | 19 +- src/bench_sign.c | 2 +- src/bench_verify.c | 2 +- src/ecdsa_impl.h | 2 +- src/ecmult.h | 1 - src/ecmult_gen.h | 1 - src/gen_context.c | 8 +- src/group_impl.h | 12 +- src/modules/ecdh/main_impl.h | 4 +- src/modules/extrakeys/main_impl.h | 30 ++- src/modules/extrakeys/tests_exhaustive_impl.h | 2 +- src/modules/extrakeys/tests_impl.h | 40 ++- src/modules/recovery/main_impl.h | 2 +- src/modules/recovery/tests_exhaustive_impl.h | 2 +- src/modules/schnorrsig/main_impl.h | 69 +++-- .../schnorrsig/tests_exhaustive_impl.h | 26 +- src/modules/schnorrsig/tests_impl.h | 235 ++++++++++++------ src/secp256k1.c | 49 +++- src/testrand_impl.h | 2 +- src/tests.c | 182 +++++++++++++- src/tests_exhaustive.c | 9 +- src/valgrind_ctime_test.c | 12 +- 42 files changed, 1222 insertions(+), 365 deletions(-) delete mode 100644 obj/.gitignore diff --git a/.cirrus.yml b/.cirrus.yml index 506a860336..bf71a70839 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -1,21 +1,28 @@ env: - WIDEMUL: auto + ### compiler options + HOST: + # Specific warnings can be disabled with -Wno-error=foo. + # -pedantic-errors is not equivalent to -Werror=pedantic and thus not implied by -Werror according to the GCC manual. + WERROR_CFLAGS: -Werror -pedantic-errors + MAKEFLAGS: -j2 + BUILD: check + ### secp256k1 config STATICPRECOMPUTATION: yes ECMULTGENPRECISION: auto ASM: no - BUILD: check + WIDEMUL: auto WITH_VALGRIND: yes - RUN_VALGRIND: no EXTRAFLAGS: - HOST: + ### secp256k1 modules + EXPERIMENTAL: no ECDH: no RECOVERY: no SCHNORRSIG: no - EXPERIMENTAL: no - CTIMETEST: yes + ### test options + TEST_ITERS: BENCH: yes - ITERS: 2 - MAKEFLAGS: -j2 + BENCH_ITERS: 2 + CTIMETEST: yes cat_logs_snippet: &CAT_LOGS always: @@ -63,27 +70,8 @@ task: - env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no} - env: {CPPFLAGS: -DDETERMINISTIC} - env: {CFLAGS: -O0, CTIMETEST: no} - - env: - CFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer" - LDFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer" - UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1" - ASM: x86_64 - ECDH: yes - RECOVERY: yes - EXPERIMENTAL: yes - SCHNORRSIG: yes - CTIMETEST: no - env: { ECMULTGENPRECISION: 2 } - env: { ECMULTGENPRECISION: 8 } - - env: - RUN_VALGRIND: yes - ASM: x86_64 - ECDH: yes - RECOVERY: yes - EXPERIMENTAL: yes - SCHNORRSIG: yes - EXTRAFLAGS: "--disable-openssl-tests" - BUILD: matrix: - env: CC: gcc @@ -111,6 +99,7 @@ task: CC: i686-linux-gnu-gcc - env: CC: clang --target=i686-pc-linux-gnu -isystem /usr/i686-linux-gnu/include + << : *MERGE_BASE test_script: - ./ci/cirrus.sh << : *CAT_LOGS @@ -181,9 +170,9 @@ task: cpu: 1 memory: 1G env: - QEMU_CMD: qemu-s390x + WRAPPER_CMD: qemu-s390x + TEST_ITERS: 16 HOST: s390x-linux-gnu - BUILD: WITH_VALGRIND: no ECDH: yes RECOVERY: yes @@ -196,3 +185,158 @@ task: - rm /etc/ld.so.cache - ./ci/cirrus.sh << : *CAT_LOGS + +task: + name: "ARM32: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-arm + TEST_ITERS: 16 + HOST: arm-linux-gnueabihf + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + matrix: + - env: {} + - env: {ASM: arm} + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "ARM64: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-aarch64 + TEST_ITERS: 16 + HOST: aarch64-linux-gnu + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "ppc64le: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-ppc64le + TEST_ITERS: 16 + HOST: powerpc64le-linux-gnu + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: wine64-stable + TEST_ITERS: 16 + HOST: x86_64-w64-mingw32 + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +# Sanitizers +task: + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + EXTRAFLAGS: "--disable-openssl-tests" + matrix: + - name: "Valgrind (memcheck)" + env: + # The `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html) + WRAPPER_CMD: "valgrind --error-exitcode=42" + TEST_ITERS: 16 + - name: "UBSan, ASan, LSan" + env: + CFLAGS: "-fsanitize=undefined,address" + CFLAGS_FOR_BUILD: "-fsanitize=undefined,address" + UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1" + ASAN_OPTIONS: "strict_string_checks=1:detect_stack_use_after_return=1:detect_leaks=1" + LSAN_OPTIONS: "use_unaligned=1" + TEST_ITERS: 32 + # Try to cover many configurations with just a tiny matrix. + matrix: + - env: + ASM: auto + STATICPRECOMPUTATION: yes + - env: + ASM: no + STATICPRECOMPUTATION: no + ECMULTGENPRECISION: 2 + matrix: + - env: + CC: clang + - env: + HOST: i686-linux-gnu + CC: i686-linux-gnu-gcc + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "C++ -fpermissive" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + # ./configure correctly errors out when given CC=g++. + # We hack around this by passing CC=g++ only to make. + CC: gcc + MAKEFLAGS: -j2 CC=g++ CFLAGS=-fpermissive + WERROR_CFLAGS: + EXPERIMENTAL: yes + ECDH: yes + RECOVERY: yes + SCHNORRSIG: yes + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS diff --git a/.gitignore b/.gitignore index ccdef02b29..79b740db8a 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ aclocal.m4 autom4te.cache/ config.log config.status +conftest* *.tar.gz *.la libtool @@ -33,6 +34,14 @@ libtool *~ *.log *.trs + +coverage/ +coverage.html +coverage.*.html +*.gcda +*.gcno +*.gcov + src/libsecp256k1-config.h src/libsecp256k1-config.h.in src/ecmult_static_context.h diff --git a/Makefile.am b/Makefile.am index 58c9635e53..1e03560884 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,5 +1,9 @@ ACLOCAL_AMFLAGS = -I build-aux/m4 +# AM_CFLAGS will be automatically prepended to CFLAGS by Automake when compiling some foo +# which does not have an explicit foo_CFLAGS variable set. +AM_CFLAGS = $(SECP_CFLAGS) + lib_LTLIBRARIES = libsecp256k1.la include_HEADERS = include/secp256k1.h include_HEADERS += include/secp256k1_preallocated.h @@ -68,7 +72,7 @@ endif endif libsecp256k1_la_SOURCES = src/secp256k1.c -libsecp256k1_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) +libsecp256k1_la_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) libsecp256k1_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB) if VALGRIND_ENABLED @@ -81,27 +85,27 @@ noinst_PROGRAMS += bench_verify bench_sign bench_internal bench_ecmult bench_verify_SOURCES = src/bench_verify.c bench_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) # SECP_TEST_INCLUDES are only used here for CRYPTO_CPPFLAGS -bench_verify_CPPFLAGS = -DSECP256K1_BUILD $(SECP_TEST_INCLUDES) +bench_verify_CPPFLAGS = $(SECP_TEST_INCLUDES) bench_sign_SOURCES = src/bench_sign.c bench_sign_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) bench_internal_SOURCES = src/bench_internal.c bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB) -bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) +bench_internal_CPPFLAGS = $(SECP_INCLUDES) bench_ecmult_SOURCES = src/bench_ecmult.c bench_ecmult_LDADD = $(SECP_LIBS) $(COMMON_LIB) -bench_ecmult_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) +bench_ecmult_CPPFLAGS = $(SECP_INCLUDES) endif TESTS = if USE_TESTS noinst_PROGRAMS += tests tests_SOURCES = src/tests.c -tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) +tests_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) if VALGRIND_ENABLED tests_CPPFLAGS += -DVALGRIND noinst_PROGRAMS += valgrind_ctime_test valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c -valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_LIBS) $(COMMON_LIB) +valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) endif if !ENABLE_COVERAGE tests_CPPFLAGS += -DVERIFY @@ -114,7 +118,7 @@ endif if USE_EXHAUSTIVE_TESTS noinst_PROGRAMS += exhaustive_tests exhaustive_tests_SOURCES = src/tests_exhaustive.c -exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDES) +exhaustive_tests_CPPFLAGS = -I$(top_srcdir)/src $(SECP_INCLUDES) if !ENABLE_COVERAGE exhaustive_tests_CPPFLAGS += -DVERIFY endif @@ -129,10 +133,10 @@ CPPFLAGS_FOR_BUILD +=-I$(top_srcdir) -I$(builddir)/src gen_context_OBJECTS = gen_context.o gen_context_BIN = gen_context$(BUILD_EXEEXT) gen_%.o: src/gen_%.c src/libsecp256k1-config.h - $(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ + $(CC_FOR_BUILD) $(DEFS) $(CPPFLAGS_FOR_BUILD) $(SECP_CFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ $(gen_context_BIN): $(gen_context_OBJECTS) - $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@ + $(CC_FOR_BUILD) $(SECP_CFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@ $(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h $(tests_OBJECTS): src/ecmult_static_context.h diff --git a/README.md b/README.md index 197a56fff8..182c29d9ce 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ Features: * Suitable for embedded systems. * Optional module for public key recovery. * Optional module for ECDH key exchange. +* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki) (experimental). Experimental features have not received enough scrutiny to satisfy the standard of quality of this library but are made available for testing and review by the community. The APIs of these features should not be considered stable. @@ -96,7 +97,8 @@ To create a report, `gcovr` is recommended, as it includes branch coverage repor To create a HTML report with coloured and annotated source code: - $ gcovr --exclude 'src/bench*' --html --html-details -o coverage.html + $ mkdir -p coverage + $ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html Reporting a vulnerability ------------ diff --git a/build-aux/m4/bitcoin_secp.m4 b/build-aux/m4/bitcoin_secp.m4 index e57888ca18..8245b2b863 100644 --- a/build-aux/m4/bitcoin_secp.m4 +++ b/build-aux/m4/bitcoin_secp.m4 @@ -82,3 +82,19 @@ if test x"$has_valgrind" != x"yes"; then AC_CHECK_HEADER([valgrind/memcheck.h], [has_valgrind=yes; AC_DEFINE(HAVE_VALGRIND,1,[Define this symbol if valgrind is installed])]) fi ]) + +dnl SECP_TRY_APPEND_CFLAGS(flags, VAR) +dnl Append flags to VAR if CC accepts them. +AC_DEFUN([SECP_TRY_APPEND_CFLAGS], [ + AC_MSG_CHECKING([if ${CC} supports $1]) + SECP_TRY_APPEND_CFLAGS_saved_CFLAGS="$CFLAGS" + CFLAGS="$1 $CFLAGS" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [flag_works=yes], [flag_works=no]) + AC_MSG_RESULT($flag_works) + CFLAGS="$SECP_TRY_APPEND_CFLAGS_saved_CFLAGS" + if test x"$flag_works" = x"yes"; then + $2="$$2 $1" + fi + unset flag_works + AC_SUBST($2) +]) diff --git a/ci/cirrus.sh b/ci/cirrus.sh index f26ca98d1d..27db1e6779 100755 --- a/ci/cirrus.sh +++ b/ci/cirrus.sh @@ -25,42 +25,27 @@ valgrind --version || true make # Print information about binaries so that we can see that the architecture is correct -file *tests || true +file *tests* || true file bench_* || true file .libs/* || true -if [ -n "$BUILD" ] -then - make "$BUILD" -fi +# This tells `make check` to wrap test invocations. +export LOG_COMPILER="$WRAPPER_CMD" -if [ "$RUN_VALGRIND" = "yes" ] -then - # the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html) - valgrind --error-exitcode=42 ./tests 16 - valgrind --error-exitcode=42 ./exhaustive_tests -fi +# This limits the iterations in the tests and benchmarks. +export SECP256K1_TEST_ITERS="$TEST_ITERS" +export SECP256K1_BENCH_ITERS="$BENCH_ITERS" -if [ -n "$QEMU_CMD" ] -then - $QEMU_CMD ./tests 16 - $QEMU_CMD ./exhaustive_tests -fi +make "$BUILD" if [ "$BENCH" = "yes" ] then # Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool EXEC='./libtool --mode=execute' - if [ -n "$QEMU_CMD" ] - then - EXEC="$EXEC $QEMU_CMD" - fi - if [ "$RUN_VALGRIND" = "yes" ] + if [ -n "$WRAPPER_CMD" ] then - EXEC="$EXEC valgrind --error-exitcode=42" + EXEC="$EXEC $WRAPPER_CMD" fi - # This limits the iterations in the benchmarks below to ITER iterations. - export SECP256K1_BENCH_ITERS="$ITERS" { $EXEC ./bench_ecmult $EXEC ./bench_internal diff --git a/ci/linux-debian.Dockerfile b/ci/linux-debian.Dockerfile index 5967cf8b31..2c02ed69d0 100644 --- a/ci/linux-debian.Dockerfile +++ b/ci/linux-debian.Dockerfile @@ -2,12 +2,24 @@ FROM debian:stable RUN dpkg --add-architecture i386 RUN dpkg --add-architecture s390x +RUN dpkg --add-architecture armhf +RUN dpkg --add-architecture arm64 +RUN dpkg --add-architecture ppc64el RUN apt-get update # dkpg-dev: to make pkg-config work in cross-builds +# llvm: for llvm-symbolizer, which is used by clang's UBSan for symbolized stack traces RUN apt-get install --no-install-recommends --no-upgrade -y \ git ca-certificates \ make automake libtool pkg-config dpkg-dev valgrind qemu-user \ - gcc clang libc6-dbg \ - gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 \ - gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x + gcc clang llvm libc6-dbg \ + g++ \ + gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan5:i386 \ + gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \ + gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \ + gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \ + wine gcc-mingw-w64-x86-64 + +# Run a dummy command in wine to make it set up configuration +RUN wine64-stable xcopy || true diff --git a/configure.ac b/configure.ac index 1ed991afa7..9969cfa343 100644 --- a/configure.ac +++ b/configure.ac @@ -8,10 +8,6 @@ AH_TOP([#define LIBSECP256K1_CONFIG_H]) AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/]) AM_INIT_AUTOMAKE([foreign subdir-objects]) -# Set -g if CFLAGS are not already set, which matches the default autoconf -# behavior (see PROG_CC in the Autoconf manual) with the exception that we don't -# set -O2 here because we set it in any case (see further down). -: ${CFLAGS="-g"} LT_INIT # Make the compilation flags quiet unless V=1 is used. @@ -42,8 +38,8 @@ AM_PROG_AS case $host_os in *darwin*) if test x$cross_compiling != xyes; then - AC_PATH_PROG([BREW],brew,) - if test x$BREW != x; then + AC_CHECK_PROG([BREW], brew, brew) + if test x$BREW = xbrew; then # These Homebrew packages may be keg-only, meaning that they won't be found # in expected paths because they may conflict with system files. Ask # Homebrew where each one is located, then adjust paths accordingly. @@ -58,10 +54,10 @@ case $host_os in VALGRIND_CPPFLAGS="-I$valgrind_prefix/include" fi else - AC_PATH_PROG([PORT],port,) + AC_CHECK_PROG([PORT], port, port) # If homebrew isn't installed and macports is, add the macports default paths # as a last resort. - if test x$PORT != x; then + if test x$PORT = xport; then CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" LDFLAGS="$LDFLAGS -L/opt/local/lib" fi @@ -70,35 +66,41 @@ case $host_os in ;; esac -CFLAGS="-W $CFLAGS" - -warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings" -saved_CFLAGS="$CFLAGS" -CFLAGS="$warn_CFLAGS $CFLAGS" -AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) - -saved_CFLAGS="$CFLAGS" -CFLAGS="-Wconditional-uninitialized $CFLAGS" -AC_MSG_CHECKING([if ${CC} supports -Wconditional-uninitialized]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) - -saved_CFLAGS="$CFLAGS" -CFLAGS="-fvisibility=hidden $CFLAGS" -AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) +# Try if some desirable compiler flags are supported and append them to SECP_CFLAGS. +# +# These are our own flags, so we append them to our own SECP_CFLAGS variable (instead of CFLAGS) as +# recommended in the automake manual (Section "Flag Variables Ordering"). CFLAGS belongs to the user +# and we are not supposed to touch it. In the Makefile, we will need to ensure that SECP_CFLAGS +# is prepended to CFLAGS when invoking the compiler so that the user always has the last word (flag). +# +# Another advantage of not touching CFLAGS is that the contents of CFLAGS will be picked up by +# libtool for compiling helper executables. For example, when compiling for Windows, libtool will +# generate entire wrapper executables (instead of simple wrapper scripts as on Unix) to ensure +# proper operation of uninstalled programs linked by libtool against the uninstalled shared library. +# These executables are compiled from C source file for which our flags may not be appropriate, +# e.g., -std=c89 flag has lead to undesirable warnings in the past. +# +# TODO We should analogously not touch CPPFLAGS and LDFLAGS but currently there are no issues. +AC_DEFUN([SECP_TRY_APPEND_DEFAULT_CFLAGS], [ + # Try to append -Werror=unknown-warning-option to CFLAGS temporarily. Otherwise clang will + # not error out if it gets unknown warning flags and the checks here will always succeed + # no matter if clang knows the flag or not. + SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS="$CFLAGS" + SECP_TRY_APPEND_CFLAGS([-Werror=unknown-warning-option], CFLAGS) + + SECP_TRY_APPEND_CFLAGS([-std=c89 -pedantic -Wno-long-long -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef], $1) # GCC >= 3.0, -Wlong-long is implied by -pedantic. + SECP_TRY_APPEND_CFLAGS([-Wno-overlength-strings], $1) # GCC >= 4.2, -Woverlength-strings is implied by -pedantic. + SECP_TRY_APPEND_CFLAGS([-Wall], $1) # GCC >= 2.95 and probably many other compilers + SECP_TRY_APPEND_CFLAGS([-Wno-unused-function], $1) # GCC >= 3.0, -Wunused-function is implied by -Wall. + SECP_TRY_APPEND_CFLAGS([-Wextra], $1) # GCC >= 3.4, this is the newer name of -W, which we don't use because older GCCs will warn about unused functions. + SECP_TRY_APPEND_CFLAGS([-Wcast-align], $1) # GCC >= 2.95 + SECP_TRY_APPEND_CFLAGS([-Wcast-align=strict], $1) # GCC >= 8.0 + SECP_TRY_APPEND_CFLAGS([-Wconditional-uninitialized], $1) # Clang >= 3.0 only + SECP_TRY_APPEND_CFLAGS([-fvisibility=hidden], $1) # GCC >= 4.0 + + CFLAGS="$SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS" +]) +SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS) ### ### Define config arguments @@ -213,10 +215,14 @@ AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"]) if test x"$enable_coverage" = x"yes"; then AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code]) - CFLAGS="-O0 --coverage $CFLAGS" + SECP_CFLAGS="-O0 --coverage $SECP_CFLAGS" LDFLAGS="--coverage $LDFLAGS" else - CFLAGS="-O2 $CFLAGS" + # Most likely the CFLAGS already contain -O2 because that is autoconf's default. + # We still add it here because passing it twice is not an issue, and handling + # this case would just add unnecessary complexity (see #896). + SECP_CFLAGS="-O2 $SECP_CFLAGS" + SECP_CFLAGS_FOR_BUILD="-O2 $SECP_CFLAGS_FOR_BUILD" fi if test x"$req_asm" = x"auto"; then @@ -351,6 +357,9 @@ if test x"$enable_valgrind" = x"yes"; then SECP_INCLUDES="$SECP_INCLUDES $VALGRIND_CPPFLAGS" fi +# Add -Werror and similar flags passed from the outside (for testing, e.g., in CI) +SECP_CFLAGS="$SECP_CFLAGS $WERROR_CFLAGS" + # Handle static precomputation (after everything which modifies CFLAGS and friends) if test x"$use_ecmult_static_precomputation" != x"no"; then if test x"$cross_compiling" = x"no"; then @@ -360,8 +369,9 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then fi # If we're not cross-compiling, simply use the same compiler for building the static precompation code. CC_FOR_BUILD="$CC" - CFLAGS_FOR_BUILD="$CFLAGS" CPPFLAGS_FOR_BUILD="$CPPFLAGS" + SECP_CFLAGS_FOR_BUILD="$SECP_CFLAGS" + CFLAGS_FOR_BUILD="$CFLAGS" LDFLAGS_FOR_BUILD="$LDFLAGS" else AX_PROG_CC_FOR_BUILD @@ -371,22 +381,14 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then cross_compiling=no SAVE_CC="$CC" CC="$CC_FOR_BUILD" - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS_FOR_BUILD" SAVE_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS_FOR_BUILD" + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS_FOR_BUILD" SAVE_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS_FOR_BUILD" - warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function" - saved_CFLAGS="$CFLAGS" - CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS" - AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}]) - AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) + SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS_FOR_BUILD) AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}]) AC_RUN_IFELSE( @@ -394,19 +396,17 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then [working_native_cc=yes], [working_native_cc=no],[:]) - CFLAGS_FOR_BUILD="$CFLAGS" - # Restore the environment cross_compiling=$save_cross_compiling CC="$SAVE_CC" - CFLAGS="$SAVE_CFLAGS" CPPFLAGS="$SAVE_CPPFLAGS" + CFLAGS="$SAVE_CFLAGS" LDFLAGS="$SAVE_LDFLAGS" if test x"$working_native_cc" = x"no"; then AC_MSG_RESULT([no]) set_precomp=no - m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) + m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CPPFLAGS_FOR_BUILD, CFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) if test x"$use_ecmult_static_precomputation" = x"yes"; then AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) else @@ -419,8 +419,9 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then fi AC_SUBST(CC_FOR_BUILD) - AC_SUBST(CFLAGS_FOR_BUILD) AC_SUBST(CPPFLAGS_FOR_BUILD) + AC_SUBST(SECP_CFLAGS_FOR_BUILD) + AC_SUBST(CFLAGS_FOR_BUILD) AC_SUBST(LDFLAGS_FOR_BUILD) else set_precomp=no @@ -490,6 +491,7 @@ AC_SUBST(SECP_INCLUDES) AC_SUBST(SECP_LIBS) AC_SUBST(SECP_TEST_LIBS) AC_SUBST(SECP_TEST_INCLUDES) +AC_SUBST(SECP_CFLAGS) AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"]) AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) @@ -532,13 +534,15 @@ fi echo echo " valgrind = $enable_valgrind" echo " CC = $CC" -echo " CFLAGS = $CFLAGS" echo " CPPFLAGS = $CPPFLAGS" +echo " SECP_CFLAGS = $SECP_CFLAGS" +echo " CFLAGS = $CFLAGS" echo " LDFLAGS = $LDFLAGS" echo if test x"$set_precomp" = x"yes"; then echo " CC_FOR_BUILD = $CC_FOR_BUILD" -echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD" echo " CPPFLAGS_FOR_BUILD = $CPPFLAGS_FOR_BUILD" +echo " SECP_CFLAGS_FOR_BUILD = $SECP_CFLAGS_FOR_BUILD" +echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD" echo " LDFLAGS_FOR_BUILD = $LDFLAGS_FOR_BUILD" fi diff --git a/contrib/lax_der_parsing.c b/contrib/lax_der_parsing.c index c1627e37e9..bf562303ed 100644 --- a/contrib/lax_der_parsing.c +++ b/contrib/lax_der_parsing.c @@ -5,7 +5,6 @@ ***********************************************************************/ #include -#include #include "lax_der_parsing.h" @@ -121,7 +120,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ /* Copy R value */ if (rlen > 32) { overflow = 1; - } else { + } else if (rlen) { memcpy(tmpsig + 32 - rlen, input + rpos, rlen); } @@ -133,7 +132,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ /* Copy S value */ if (slen > 32) { overflow = 1; - } else { + } else if (slen) { memcpy(tmpsig + 64 - slen, input + spos, slen); } diff --git a/contrib/lax_der_parsing.h b/contrib/lax_der_parsing.h index 6b7255e28f..034a38e6a0 100644 --- a/contrib/lax_der_parsing.h +++ b/contrib/lax_der_parsing.h @@ -51,7 +51,13 @@ #ifndef SECP256K1_CONTRIB_LAX_DER_PARSING_H #define SECP256K1_CONTRIB_LAX_DER_PARSING_H +/* #include secp256k1.h only when it hasn't been included yet. + This enables this file to be #included directly in other project + files (such as tests.c) without the need to set an explicit -I flag, + which would be necessary to locate secp256k1.h. */ +#ifndef SECP256K1_H #include +#endif #ifdef __cplusplus extern "C" { diff --git a/contrib/lax_der_privatekey_parsing.c b/contrib/lax_der_privatekey_parsing.c index 429760fbb6..a1b8200079 100644 --- a/contrib/lax_der_privatekey_parsing.c +++ b/contrib/lax_der_privatekey_parsing.c @@ -5,7 +5,6 @@ ***********************************************************************/ #include -#include #include "lax_der_privatekey_parsing.h" @@ -45,7 +44,7 @@ int ec_privkey_import_der(const secp256k1_context* ctx, unsigned char *out32, co if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) { return 0; } - memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); + if (privkey[1]) memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); if (!secp256k1_ec_seckey_verify(ctx, out32)) { memset(out32, 0, 32); return 0; diff --git a/contrib/lax_der_privatekey_parsing.h b/contrib/lax_der_privatekey_parsing.h index 602c7c556a..1a8ad8ae0c 100644 --- a/contrib/lax_der_privatekey_parsing.h +++ b/contrib/lax_der_privatekey_parsing.h @@ -28,7 +28,13 @@ #ifndef SECP256K1_CONTRIB_BER_PRIVATEKEY_H #define SECP256K1_CONTRIB_BER_PRIVATEKEY_H +/* #include secp256k1.h only when it hasn't been included yet. + This enables this file to be #included directly in other project + files (such as tests.c) without the need to set an explicit -I flag, + which would be necessary to locate secp256k1.h. */ +#ifndef SECP256K1_H #include +#endif #ifdef __cplusplus extern "C" { diff --git a/include/secp256k1.h b/include/secp256k1.h index d368488af2..7be7fd5723 100644 --- a/include/secp256k1.h +++ b/include/secp256k1.h @@ -7,7 +7,9 @@ extern "C" { #include -/* These rules specify the order of arguments in API calls: +/* Unless explicitly stated all pointer arguments must not be NULL. + * + * The following rules specify the order of arguments in API calls: * * 1. Context pointers go first, followed by output arguments, combined * output/input arguments, and finally input-only arguments. @@ -61,8 +63,9 @@ typedef struct secp256k1_scratch_space_struct secp256k1_scratch_space; * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse. + * If you need to convert to a format suitable for storage or transmission, + * use secp256k1_ec_pubkey_serialize and secp256k1_ec_pubkey_parse. To + * compare keys, use secp256k1_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; @@ -127,6 +130,17 @@ typedef int (*secp256k1_nonce_function)( # define SECP256K1_INLINE inline # endif +/** When this header is used at build-time the SECP256K1_BUILD define needs to be set + * to correctly setup export attributes and nullness checks. This is normally done + * by secp256k1.c but to guard against this header being included before secp256k1.c + * has had a chance to set the define (e.g. via test harnesses that just includes + * secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the + * BUILD define so this condition can be caught. + */ +#ifndef SECP256K1_BUILD +# define SECP256K1_NO_BUILD +#endif + #ifndef SECP256K1_API # if defined(_WIN32) # ifdef SECP256K1_BUILD @@ -370,6 +384,21 @@ SECP256K1_API int secp256k1_ec_pubkey_serialize( unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Compare two public keys using lexicographic (of compressed serialization) order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_cmp( + const secp256k1_context* ctx, + const secp256k1_pubkey* pubkey1, + const secp256k1_pubkey* pubkey2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Parse an ECDSA signature in compact (64 bytes) format. * * Returns: 1 when the signature could be parsed, 0 otherwise. @@ -764,6 +793,31 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_combine( size_t n ) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Compute a tagged hash as defined in BIP-340. + * + * This is useful for creating a message hash and achieving domain separation + * through an application-specific tag. This function returns + * SHA256(SHA256(tag)||SHA256(tag)||msg). Therefore, tagged hash + * implementations optimized for a specific tag can precompute the SHA256 state + * after hashing the tag hashes. + * + * Returns 0 if the arguments are invalid and 1 otherwise. + * Args: ctx: pointer to a context object + * Out: hash32: pointer to a 32-byte array to store the resulting hash + * In: tag: pointer to an array containing the tag + * taglen: length of the tag array + * msg: pointer to an array containing the message + * msglen: length of the message array + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_tagged_sha256( + const secp256k1_context* ctx, + unsigned char *hash32, + const unsigned char *tag, + size_t taglen, + const unsigned char *msg, + size_t msglen +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5); + #ifdef __cplusplus } #endif diff --git a/include/secp256k1_extrakeys.h b/include/secp256k1_extrakeys.h index 6fc7b290f8..0a37fb6b9d 100644 --- a/include/secp256k1_extrakeys.h +++ b/include/secp256k1_extrakeys.h @@ -15,9 +15,9 @@ extern "C" { * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use secp256k1_xonly_pubkey_serialize and - * secp256k1_xonly_pubkey_parse. + * If you need to convert to a format suitable for storage, transmission, use + * use secp256k1_xonly_pubkey_serialize and secp256k1_xonly_pubkey_parse. To + * compare keys, use secp256k1_xonly_pubkey_cmp. */ typedef struct { unsigned char data[64]; @@ -67,6 +67,21 @@ SECP256K1_API int secp256k1_xonly_pubkey_serialize( const secp256k1_xonly_pubkey* pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Compare two x-only public keys using lexicographic order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API int secp256k1_xonly_pubkey_cmp( + const secp256k1_context* ctx, + const secp256k1_xonly_pubkey* pk1, + const secp256k1_xonly_pubkey* pk2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Converts a secp256k1_pubkey into a secp256k1_xonly_pubkey. * * Returns: 1 if the public key was successfully converted diff --git a/include/secp256k1_schnorrsig.h b/include/secp256k1_schnorrsig.h index 0150cd3395..74cbcac45e 100644 --- a/include/secp256k1_schnorrsig.h +++ b/include/secp256k1_schnorrsig.h @@ -23,24 +23,29 @@ extern "C" { * * Returns: 1 if a nonce was successfully generated. 0 will cause signing to * return an error. - * Out: nonce32: pointer to a 32-byte array to be filled by the function. - * In: msg32: the 32-byte message hash being verified (will not be NULL) - * key32: pointer to a 32-byte secret key (will not be NULL) - * xonly_pk32: the 32-byte serialized xonly pubkey corresponding to key32 - * (will not be NULL) - * algo16: pointer to a 16-byte array describing the signature - * algorithm (will not be NULL). - * data: Arbitrary data pointer that is passed through. + * Out: nonce32: pointer to a 32-byte array to be filled by the function + * In: msg: the message being verified. Is NULL if and only if msglen + * is 0. + * msglen: the length of the message + * key32: pointer to a 32-byte secret key (will not be NULL) + * xonly_pk32: the 32-byte serialized xonly pubkey corresponding to key32 + * (will not be NULL) + * algo: pointer to an array describing the signature + * algorithm (will not be NULL) + * algolen: the length of the algo array + * data: arbitrary data pointer that is passed through * * Except for test cases, this function should compute some cryptographic hash of * the message, the key, the pubkey, the algorithm description, and data. */ typedef int (*secp256k1_nonce_function_hardened)( unsigned char *nonce32, - const unsigned char *msg32, + const unsigned char *msg, + size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, - const unsigned char *algo16, + const unsigned char *algo, + size_t algolen, void *data ); @@ -50,59 +55,113 @@ typedef int (*secp256k1_nonce_function_hardened)( * * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of * auxiliary random data as defined in BIP-340. If the data pointer is NULL, - * schnorrsig_sign does not produce BIP-340 compliant signatures. The algo16 - * argument must be non-NULL, otherwise the function will fail and return 0. - * The hash will be tagged with algo16 after removing all terminating null - * bytes. Therefore, to create BIP-340 compliant signatures, algo16 must be set - * to "BIP0340/nonce\0\0\0" + * the nonce derivation procedure follows BIP-340 by setting the auxiliary + * random data to zero. The algo argument must be non-NULL, otherwise the + * function will fail and return 0. The hash will be tagged with algo. + * Therefore, to create BIP-340 compliant signatures, algo must be set to + * "BIP0340/nonce" and algolen to 13. */ SECP256K1_API extern const secp256k1_nonce_function_hardened secp256k1_nonce_function_bip340; +/** Data structure that contains additional arguments for schnorrsig_sign_custom. + * + * A schnorrsig_extraparams structure object can be initialized correctly by + * setting it to SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT. + * + * Members: + * magic: set to SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC at initialization + * and has no other function than making sure the object is + * initialized. + * noncefp: pointer to a nonce generation function. If NULL, + * secp256k1_nonce_function_bip340 is used + * ndata: pointer to arbitrary data used by the nonce generation function + * (can be NULL). If it is non-NULL and + * secp256k1_nonce_function_bip340 is used, then ndata must be a + * pointer to 32-byte auxiliary randomness as per BIP-340. + */ +typedef struct { + unsigned char magic[4]; + secp256k1_nonce_function_hardened noncefp; + void* ndata; +} secp256k1_schnorrsig_extraparams; + +#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC { 0xda, 0x6f, 0xb3, 0x8c } +#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT {\ + SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC,\ + NULL,\ + NULL\ +} + /** Create a Schnorr signature. * * Does _not_ strictly follow BIP-340 because it does not verify the resulting * signature. Instead, you can manually use secp256k1_schnorrsig_verify and * abort if it fails. * - * Otherwise BIP-340 compliant if the noncefp argument is NULL or - * secp256k1_nonce_function_bip340 and the ndata argument is 32-byte auxiliary - * randomness. + * This function only signs 32-byte messages. If you have messages of a + * different size (or the same size but without a context-specific tag + * prefix), it is recommended to create a 32-byte message hash with + * secp256k1_tagged_sha256 and then sign the hash. Tagged hashing allows + * providing an context-specific tag for domain separation. This prevents + * signatures from being valid in multiple contexts by accident. * * Returns 1 on success, 0 on failure. * Args: ctx: pointer to a context object, initialized for signing (cannot be NULL) * Out: sig64: pointer to a 64-byte array to store the serialized signature (cannot be NULL) * In: msg32: the 32-byte message being signed (cannot be NULL) * keypair: pointer to an initialized keypair (cannot be NULL) - * noncefp: pointer to a nonce generation function. If NULL, secp256k1_nonce_function_bip340 is used - * ndata: pointer to arbitrary data used by the nonce generation - * function (can be NULL). If it is non-NULL and - * secp256k1_nonce_function_bip340 is used, then ndata must be a - * pointer to 32-byte auxiliary randomness as per BIP-340. + * aux_rand32: 32 bytes of fresh randomness. While recommended to provide + * this, it is only supplemental to security and can be NULL. See + * BIP-340 "Default Signing" for a full explanation of this + * argument and for guidance if randomness is expensive. */ SECP256K1_API int secp256k1_schnorrsig_sign( const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, - secp256k1_nonce_function_hardened noncefp, - void *ndata + unsigned char *aux_rand32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Create a Schnorr signature with a more flexible API. + * + * Same arguments as secp256k1_schnorrsig_sign except that it allows signing + * variable length messages and accepts a pointer to an extraparams object that + * allows customizing signing by passing additional arguments. + * + * Creates the same signatures as schnorrsig_sign if msglen is 32 and the + * extraparams.ndata is the same as aux_rand32. + * + * In: msg: the message being signed. Can only be NULL if msglen is 0. + * msglen: length of the message + * extraparams: pointer to a extraparams object (can be NULL) + */ +SECP256K1_API int secp256k1_schnorrsig_sign_custom( + const secp256k1_context* ctx, + unsigned char *sig64, + const unsigned char *msg, + size_t msglen, + const secp256k1_keypair *keypair, + secp256k1_schnorrsig_extraparams *extraparams +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); + /** Verify a Schnorr signature. * * Returns: 1: correct signature * 0: incorrect signature * Args: ctx: a secp256k1 context object, initialized for verification. * In: sig64: pointer to the 64-byte signature to verify (cannot be NULL) - * msg32: the 32-byte message being verified (cannot be NULL) + * msg: the message being verified. Can only be NULL if msglen is 0. + * msglen: length of the message * pubkey: pointer to an x-only public key to verify with (cannot be NULL) */ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorrsig_verify( const secp256k1_context* ctx, const unsigned char *sig64, - const unsigned char *msg32, + const unsigned char *msg, + size_t msglen, const secp256k1_xonly_pubkey *pubkey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); #ifdef __cplusplus } diff --git a/obj/.gitignore b/obj/.gitignore deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/src/bench_ecdh.c b/src/bench_ecdh.c index ab4b8f4244..cb020d26b4 100644 --- a/src/bench_ecdh.c +++ b/src/bench_ecdh.c @@ -6,8 +6,8 @@ #include -#include "include/secp256k1.h" -#include "include/secp256k1_ecdh.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_ecdh.h" #include "util.h" #include "bench.h" diff --git a/src/bench_ecmult.c b/src/bench_ecmult.c index 204e85a5dd..1d463f92d0 100644 --- a/src/bench_ecmult.c +++ b/src/bench_ecmult.c @@ -5,7 +5,8 @@ ***********************************************************************/ #include -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "util.h" #include "hash_impl.h" @@ -14,33 +15,177 @@ #include "scalar_impl.h" #include "ecmult_impl.h" #include "bench.h" -#include "secp256k1.c" #define POINTS 32768 +void help(char **argv) { + printf("Benchmark EC multiplication algorithms\n"); + printf("\n"); + printf("Usage: %s \n", argv[0]); + printf("The output shows the number of multiplied and summed points right after the\n"); + printf("function name. The letter 'g' indicates that one of the points is the generator.\n"); + printf("The benchmarks are divided by the number of points.\n"); + printf("\n"); + printf("default (ecmult_multi): picks pippenger_wnaf or strauss_wnaf depending on the\n"); + printf(" batch size\n"); + printf("pippenger_wnaf: for all batch sizes\n"); + printf("strauss_wnaf: for all batch sizes\n"); + printf("simple: multiply and sum each point individually\n"); +} + typedef struct { /* Setup once in advance */ secp256k1_context* ctx; secp256k1_scratch_space* scratch; secp256k1_scalar* scalars; secp256k1_ge* pubkeys; + secp256k1_gej* pubkeys_gej; secp256k1_scalar* seckeys; secp256k1_gej* expected_output; secp256k1_ecmult_multi_func ecmult_multi; - /* Changes per test */ + /* Changes per benchmark */ size_t count; int includes_g; - /* Changes per test iteration */ + /* Changes per benchmark iteration, used to pick different scalars and pubkeys + * in each run. */ size_t offset1; size_t offset2; - /* Test output. */ + /* Benchmark output. */ secp256k1_gej* output; } bench_data; -static int bench_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, void* arg) { +/* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */ +static void hash_into_offset(bench_data* data, size_t x) { + data->offset1 = (x * 0x537b7f6f + 0x8f66a481) % POINTS; + data->offset2 = (x * 0x7f6f537b + 0x6a1a8f49) % POINTS; +} + +/* Check correctness of the benchmark by computing + * sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */ +static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) { + int i; + secp256k1_gej sum_output, tmp; + secp256k1_scalar sum_scalars; + + secp256k1_gej_set_infinity(&sum_output); + secp256k1_scalar_clear(&sum_scalars); + for (i = 0; i < iters; ++i) { + secp256k1_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL); + if (scalar_gen_offset != NULL) { + secp256k1_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]); + } + if (seckey_offset != NULL) { + secp256k1_scalar s = data->seckeys[(*seckey_offset+i) % POINTS]; + secp256k1_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]); + secp256k1_scalar_add(&sum_scalars, &sum_scalars, &s); + } + } + secp256k1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars); + secp256k1_gej_neg(&tmp, &tmp); + secp256k1_gej_add_var(&tmp, &tmp, &sum_output, NULL); + CHECK(secp256k1_gej_is_infinity(&tmp)); +} + +static void bench_ecmult_setup(void* arg) { + bench_data* data = (bench_data*)arg; + /* Re-randomize offset to ensure that we're using different scalars and + * group elements in each run. */ + hash_into_offset(data, data->offset1); +} + +static void bench_ecmult_gen(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + secp256k1_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_gen_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters); +} + +static void bench_ecmult_const(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + secp256k1_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256); + } +} + +static void bench_ecmult_const_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters); +} + +static void bench_ecmult_1(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL); + } +} + +static void bench_ecmult_1_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters); +} + +static void bench_ecmult_1g(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + secp256k1_scalar zero; + int i; + + secp256k1_scalar_set_int(&zero, 0); + for (i = 0; i < iters; ++i) { + secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_1g_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters); +} + +static void bench_ecmult_2g(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters/2; ++i) { + secp256k1_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_2g_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, &data->offset1, iters/2); +} + +static void run_ecmult_bench(bench_data* data, int iters) { + char str[32]; + sprintf(str, "ecmult_gen"); + run_benchmark(str, bench_ecmult_gen, bench_ecmult_setup, bench_ecmult_gen_teardown, data, 10, iters); + sprintf(str, "ecmult_const"); + run_benchmark(str, bench_ecmult_const, bench_ecmult_setup, bench_ecmult_const_teardown, data, 10, iters); + /* ecmult with non generator point */ + sprintf(str, "ecmult 1"); + run_benchmark(str, bench_ecmult_1, bench_ecmult_setup, bench_ecmult_1_teardown, data, 10, iters); + /* ecmult with generator point */ + sprintf(str, "ecmult 1g"); + run_benchmark(str, bench_ecmult_1g, bench_ecmult_setup, bench_ecmult_1g_teardown, data, 10, iters); + /* ecmult with generator and non-generator point. The reported time is per point. */ + sprintf(str, "ecmult 2g"); + run_benchmark(str, bench_ecmult_2g, bench_ecmult_setup, bench_ecmult_2g_teardown, data, 10, 2*iters); +} + +static int bench_ecmult_multi_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, void* arg) { bench_data* data = (bench_data*)arg; if (data->includes_g) ++idx; if (idx == 0) { @@ -53,7 +198,7 @@ static int bench_callback(secp256k1_scalar* sc, secp256k1_ge* ge, size_t idx, vo return 1; } -static void bench_ecmult(void* arg, int iters) { +static void bench_ecmult_multi(void* arg, int iters) { bench_data* data = (bench_data*)arg; int includes_g = data->includes_g; @@ -62,19 +207,18 @@ static void bench_ecmult(void* arg, int iters) { iters = iters / data->count; for (iter = 0; iter < iters; ++iter) { - data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g); + data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_ecmult_multi_callback, arg, count - includes_g); data->offset1 = (data->offset1 + count) % POINTS; data->offset2 = (data->offset2 + count - 1) % POINTS; } } -static void bench_ecmult_setup(void* arg) { +static void bench_ecmult_multi_setup(void* arg) { bench_data* data = (bench_data*)arg; - data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; - data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; + hash_into_offset(data, data->count); } -static void bench_ecmult_teardown(void* arg, int iters) { +static void bench_ecmult_multi_teardown(void* arg, int iters) { bench_data* data = (bench_data*)arg; int iter; iters = iters / data->count; @@ -88,7 +232,7 @@ static void bench_ecmult_teardown(void* arg, int iters) { static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) { secp256k1_sha256 sha256; - unsigned char c[11] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; + unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; unsigned char buf[32]; int overflow = 0; c[6] = num; @@ -102,7 +246,7 @@ static void generate_scalar(uint32_t num, secp256k1_scalar* scalar) { CHECK(!overflow); } -static void run_test(bench_data* data, size_t count, int includes_g, int num_iters) { +static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_g, int num_iters) { char str[32]; static const secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); size_t iters = 1 + num_iters / count; @@ -112,8 +256,7 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite data->includes_g = includes_g; /* Compute (the negation of) the expected results directly. */ - data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; - data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; + hash_into_offset(data, data->count); for (iter = 0; iter < iters; ++iter) { secp256k1_scalar tmp; secp256k1_scalar total = data->scalars[(data->offset1++) % POINTS]; @@ -127,25 +270,26 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite } /* Run the benchmark. */ - sprintf(str, includes_g ? "ecmult_%ig" : "ecmult_%i", (int)count); - run_benchmark(str, bench_ecmult, bench_ecmult_setup, bench_ecmult_teardown, data, 10, count * iters); + sprintf(str, includes_g ? "ecmult_multi %ig" : "ecmult_multi %i", (int)count); + run_benchmark(str, bench_ecmult_multi, bench_ecmult_multi_setup, bench_ecmult_multi_teardown, data, 10, count * iters); } int main(int argc, char **argv) { bench_data data; int i, p; - secp256k1_gej* pubkeys_gej; size_t scratch_size; int iters = get_iters(10000); - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; - data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size); data.ecmult_multi = secp256k1_ecmult_multi_var; if (argc > 1) { - if(have_flag(argc, argv, "pippenger_wnaf")) { + if(have_flag(argc, argv, "-h") + || have_flag(argc, argv, "--help") + || have_flag(argc, argv, "help")) { + help(argv); + return 1; + } else if(have_flag(argc, argv, "pippenger_wnaf")) { printf("Using pippenger_wnaf:\n"); data.ecmult_multi = secp256k1_ecmult_pippenger_batch_single; } else if(have_flag(argc, argv, "strauss_wnaf")) { @@ -153,39 +297,48 @@ int main(int argc, char **argv) { data.ecmult_multi = secp256k1_ecmult_strauss_batch_single; } else if(have_flag(argc, argv, "simple")) { printf("Using simple algorithm:\n"); - data.ecmult_multi = secp256k1_ecmult_multi_var; - secp256k1_scratch_space_destroy(data.ctx, data.scratch); - data.scratch = NULL; } else { - fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]); - fprintf(stderr, "Use 'pippenger_wnaf', 'strauss_wnaf', 'simple' or no argument to benchmark a combined algorithm.\n"); + fprintf(stderr, "%s: unrecognized argument '%s'.\n\n", argv[0], argv[1]); + help(argv); return 1; } } + data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; + if (!have_flag(argc, argv, "simple")) { + data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size); + } else { + data.scratch = NULL; + } + /* Allocate stuff */ data.scalars = malloc(sizeof(secp256k1_scalar) * POINTS); data.seckeys = malloc(sizeof(secp256k1_scalar) * POINTS); data.pubkeys = malloc(sizeof(secp256k1_ge) * POINTS); + data.pubkeys_gej = malloc(sizeof(secp256k1_gej) * POINTS); data.expected_output = malloc(sizeof(secp256k1_gej) * (iters + 1)); data.output = malloc(sizeof(secp256k1_gej) * (iters + 1)); /* Generate a set of scalars, and private/public keypairs. */ - pubkeys_gej = malloc(sizeof(secp256k1_gej) * POINTS); - secp256k1_gej_set_ge(&pubkeys_gej[0], &secp256k1_ge_const_g); + secp256k1_gej_set_ge(&data.pubkeys_gej[0], &secp256k1_ge_const_g); secp256k1_scalar_set_int(&data.seckeys[0], 1); for (i = 0; i < POINTS; ++i) { generate_scalar(i, &data.scalars[i]); if (i) { - secp256k1_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL); + secp256k1_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL); secp256k1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); } } - secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS); - free(pubkeys_gej); + secp256k1_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS); + + + /* Initialize offset1 and offset2 */ + hash_into_offset(&data, 0); + run_ecmult_bench(&data, iters); for (i = 1; i <= 8; ++i) { - run_test(&data, i, 1, iters); + run_ecmult_multi_bench(&data, i, 1, iters); } /* This is disabled with low count of iterations because the loop runs 77 times even with iters=1 @@ -194,7 +347,7 @@ int main(int argc, char **argv) { if (iters > 2) { for (p = 0; p <= 11; ++p) { for (i = 9; i <= 16; ++i) { - run_test(&data, i << p, 1, iters); + run_ecmult_multi_bench(&data, i << p, 1, iters); } } } @@ -205,6 +358,7 @@ int main(int argc, char **argv) { secp256k1_context_destroy(data.ctx); free(data.scalars); free(data.pubkeys); + free(data.pubkeys_gej); free(data.seckeys); free(data.output); free(data.expected_output); diff --git a/src/bench_internal.c b/src/bench_internal.c index 73b8a24ccb..161b1c4a47 100644 --- a/src/bench_internal.c +++ b/src/bench_internal.c @@ -5,7 +5,8 @@ ***********************************************************************/ #include -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" @@ -16,7 +17,6 @@ #include "ecmult_const_impl.h" #include "ecmult_impl.h" #include "bench.h" -#include "secp256k1.c" typedef struct { secp256k1_scalar scalar[2]; diff --git a/src/bench_recover.c b/src/bench_recover.c index 3f6270ce84..4bcac19dc0 100644 --- a/src/bench_recover.c +++ b/src/bench_recover.c @@ -4,8 +4,8 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_recovery.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_recovery.h" #include "util.h" #include "bench.h" diff --git a/src/bench_schnorrsig.c b/src/bench_schnorrsig.c index f7f591c41d..d95bc00f48 100644 --- a/src/bench_schnorrsig.c +++ b/src/bench_schnorrsig.c @@ -8,11 +8,13 @@ #include -#include "include/secp256k1.h" -#include "include/secp256k1_schnorrsig.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_schnorrsig.h" #include "util.h" #include "bench.h" +#define MSGLEN 32 + typedef struct { secp256k1_context *ctx; int n; @@ -26,13 +28,13 @@ typedef struct { void bench_schnorrsig_sign(void* arg, int iters) { bench_schnorrsig_data *data = (bench_schnorrsig_data *)arg; int i; - unsigned char msg[32] = "benchmarkexamplemessagetemplate"; + unsigned char msg[MSGLEN] = {0}; unsigned char sig[64]; for (i = 0; i < iters; i++) { msg[0] = i; msg[1] = i >> 8; - CHECK(secp256k1_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL)); + CHECK(secp256k1_schnorrsig_sign_custom(data->ctx, sig, msg, MSGLEN, data->keypairs[i], NULL)); } } @@ -43,7 +45,7 @@ void bench_schnorrsig_verify(void* arg, int iters) { for (i = 0; i < iters; i++) { secp256k1_xonly_pubkey pk; CHECK(secp256k1_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); - CHECK(secp256k1_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk)); + CHECK(secp256k1_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], MSGLEN, &pk)); } } @@ -58,9 +60,10 @@ int main(void) { data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); + CHECK(MSGLEN >= 4); for (i = 0; i < iters; i++) { unsigned char sk[32]; - unsigned char *msg = (unsigned char *)malloc(32); + unsigned char *msg = (unsigned char *)malloc(MSGLEN); unsigned char *sig = (unsigned char *)malloc(64); secp256k1_keypair *keypair = (secp256k1_keypair *)malloc(sizeof(*keypair)); unsigned char *pk_char = (unsigned char *)malloc(32); @@ -69,7 +72,7 @@ int main(void) { msg[1] = sk[1] = i >> 8; msg[2] = sk[2] = i >> 16; msg[3] = sk[3] = i >> 24; - memset(&msg[4], 'm', 28); + memset(&msg[4], 'm', MSGLEN - 4); memset(&sk[4], 's', 28); data.keypairs[i] = keypair; @@ -78,7 +81,7 @@ int main(void) { data.sigs[i] = sig; CHECK(secp256k1_keypair_create(data.ctx, keypair, sk)); - CHECK(secp256k1_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL)); + CHECK(secp256k1_schnorrsig_sign_custom(data.ctx, sig, msg, MSGLEN, keypair, NULL)); CHECK(secp256k1_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); CHECK(secp256k1_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); } diff --git a/src/bench_sign.c b/src/bench_sign.c index 933f367c4b..f659c18c92 100644 --- a/src/bench_sign.c +++ b/src/bench_sign.c @@ -4,7 +4,7 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "util.h" #include "bench.h" diff --git a/src/bench_verify.c b/src/bench_verify.c index c56aefd369..565ae4beec 100644 --- a/src/bench_verify.c +++ b/src/bench_verify.c @@ -7,7 +7,7 @@ #include #include -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "util.h" #include "bench.h" diff --git a/src/ecdsa_impl.h b/src/ecdsa_impl.h index 156a33d112..c32141e887 100644 --- a/src/ecdsa_impl.h +++ b/src/ecdsa_impl.h @@ -140,7 +140,7 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char overflow = 1; } if (!overflow) { - memcpy(ra + 32 - rlen, *sig, rlen); + if (rlen) memcpy(ra + 32 - rlen, *sig, rlen); secp256k1_scalar_set_b32(r, ra, &overflow); } if (overflow) { diff --git a/src/ecmult.h b/src/ecmult.h index 7ab617e20e..84537bbfed 100644 --- a/src/ecmult.h +++ b/src/ecmult.h @@ -17,7 +17,6 @@ typedef struct { secp256k1_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ } secp256k1_ecmult_context; -static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx); static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc); static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src); diff --git a/src/ecmult_gen.h b/src/ecmult_gen.h index 539618dcbb..05cf4d52cc 100644 --- a/src/ecmult_gen.h +++ b/src/ecmult_gen.h @@ -35,7 +35,6 @@ typedef struct { secp256k1_gej initial; } secp256k1_ecmult_gen_context; -static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx); static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, void **prealloc); static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context* src); diff --git a/src/gen_context.c b/src/gen_context.c index 024c557261..f9176eb996 100644 --- a/src/gen_context.c +++ b/src/gen_context.c @@ -13,7 +13,13 @@ /* We can't require the precomputed tables when creating them. */ #undef USE_ECMULT_STATIC_PRECOMPUTATION -#include "include/secp256k1.h" +/* In principle we could use ASM, but this yields only a minor speedup in + build time and it's very complicated. In particular when cross-compiling, we'd + need to build the ASM for the build and the host machine. */ +#undef USE_EXTERNAL_ASM +#undef USE_ASM_X86_64 + +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #include "field_impl.h" diff --git a/src/group_impl.h b/src/group_impl.h index 19ebd8f44e..47aea32be1 100644 --- a/src/group_impl.h +++ b/src/group_impl.h @@ -100,8 +100,8 @@ static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a) { static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { secp256k1_fe z2, z3; - r->infinity = a->infinity; if (a->infinity) { + secp256k1_ge_set_infinity(r); return; } secp256k1_fe_inv_var(&a->z, &a->z); @@ -110,8 +110,7 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { secp256k1_fe_mul(&a->x, &a->x, &z2); secp256k1_fe_mul(&a->y, &a->y, &z3); secp256k1_fe_set_int(&a->z, 1); - r->x = a->x; - r->y = a->y; + secp256k1_ge_set_xy(r, &a->x, &a->y); } static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) { @@ -120,7 +119,9 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a size_t last_i = SIZE_MAX; for (i = 0; i < len; i++) { - if (!a[i].infinity) { + if (a[i].infinity) { + secp256k1_ge_set_infinity(&r[i]); + } else { /* Use destination's x coordinates as scratch space */ if (last_i == SIZE_MAX) { r[i].x = a[i].z; @@ -148,7 +149,6 @@ static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a r[last_i].x = u; for (i = 0; i < len; i++) { - r[i].infinity = a[i].infinity; if (!a[i].infinity) { secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } @@ -311,7 +311,7 @@ static void secp256k1_gej_double_var(secp256k1_gej *r, const secp256k1_gej *a, s * point will be gibberish (z = 0 but infinity = 0). */ if (a->infinity) { - r->infinity = 1; + secp256k1_gej_set_infinity(r); if (rzr != NULL) { secp256k1_fe_set_int(rzr, 1); } diff --git a/src/modules/ecdh/main_impl.h b/src/modules/ecdh/main_impl.h index 1ac67086be..5408c9de70 100644 --- a/src/modules/ecdh/main_impl.h +++ b/src/modules/ecdh/main_impl.h @@ -7,8 +7,8 @@ #ifndef SECP256K1_MODULE_ECDH_MAIN_H #define SECP256K1_MODULE_ECDH_MAIN_H -#include "include/secp256k1_ecdh.h" -#include "ecmult_const_impl.h" +#include "../../../include/secp256k1_ecdh.h" +#include "../../ecmult_const_impl.h" static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x32, const unsigned char *y32, void *data) { unsigned char version = (y32[31] & 0x01) | 0x02; diff --git a/src/modules/extrakeys/main_impl.h b/src/modules/extrakeys/main_impl.h index 7390b22718..8607bbede7 100644 --- a/src/modules/extrakeys/main_impl.h +++ b/src/modules/extrakeys/main_impl.h @@ -7,8 +7,8 @@ #ifndef SECP256K1_MODULE_EXTRAKEYS_MAIN_H #define SECP256K1_MODULE_EXTRAKEYS_MAIN_H -#include "include/secp256k1.h" -#include "include/secp256k1_extrakeys.h" +#include "../../../include/secp256k1.h" +#include "../../../include/secp256k1_extrakeys.h" static SECP256K1_INLINE int secp256k1_xonly_pubkey_load(const secp256k1_context* ctx, secp256k1_ge *ge, const secp256k1_xonly_pubkey *pubkey) { return secp256k1_pubkey_load(ctx, ge, (const secp256k1_pubkey *) pubkey); @@ -55,6 +55,32 @@ int secp256k1_xonly_pubkey_serialize(const secp256k1_context* ctx, unsigned char return 1; } +int secp256k1_xonly_pubkey_cmp(const secp256k1_context* ctx, const secp256k1_xonly_pubkey* pk0, const secp256k1_xonly_pubkey* pk1) { + unsigned char out[2][32]; + const secp256k1_xonly_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pk0; pk[1] = pk1; + for (i = 0; i < 2; i++) { + /* If the public key is NULL or invalid, xonly_pubkey_serialize will + * call the illegal_callback and return 0. In that case we will + * serialize the key as all zeros which is less than any valid public + * key. This results in consistent comparisons even if NULL or invalid + * pubkeys are involved and prevents edge cases such as sorting + * algorithms that use this function and do not terminate as a + * result. */ + if (!secp256k1_xonly_pubkey_serialize(ctx, out[i], pk[i])) { + /* Note that xonly_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return secp256k1_memcmp_var(out[0], out[1], sizeof(out[1])); +} + /** Keeps a group element as is if it has an even Y and otherwise negates it. * y_parity is set to 0 in the former case and to 1 in the latter case. * Requires that the coordinates of r are normalized. */ diff --git a/src/modules/extrakeys/tests_exhaustive_impl.h b/src/modules/extrakeys/tests_exhaustive_impl.h index 0aca4fb72d..d4a2f5bdf4 100644 --- a/src/modules/extrakeys/tests_exhaustive_impl.h +++ b/src/modules/extrakeys/tests_exhaustive_impl.h @@ -8,7 +8,7 @@ #define SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H #include "src/modules/extrakeys/main_impl.h" -#include "include/secp256k1_extrakeys.h" +#include "../../../include/secp256k1_extrakeys.h" static void test_exhaustive_extrakeys(const secp256k1_context *ctx, const secp256k1_ge* group) { secp256k1_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; diff --git a/src/modules/extrakeys/tests_impl.h b/src/modules/extrakeys/tests_impl.h index 9473a7dd48..4a5952714c 100644 --- a/src/modules/extrakeys/tests_impl.h +++ b/src/modules/extrakeys/tests_impl.h @@ -7,7 +7,7 @@ #ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_H #define SECP256K1_MODULE_EXTRAKEYS_TESTS_H -#include "secp256k1_extrakeys.h" +#include "../../../include/secp256k1_extrakeys.h" static secp256k1_context* api_test_context(int flags, int *ecount) { secp256k1_context *ctx0 = secp256k1_context_create(flags); @@ -137,6 +137,43 @@ void test_xonly_pubkey(void) { secp256k1_context_destroy(verify); } +void test_xonly_pubkey_comparison(void) { + unsigned char pk1_ser[32] = { + 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11, + 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23 + }; + const unsigned char pk2_ser[32] = { + 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, + 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c + }; + secp256k1_xonly_pubkey pk1; + secp256k1_xonly_pubkey pk2; + int ecount = 0; + secp256k1_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + + CHECK(secp256k1_xonly_pubkey_parse(none, &pk1, pk1_ser) == 1); + CHECK(secp256k1_xonly_pubkey_parse(none, &pk2, pk2_ser) == 1); + + CHECK(secp256k1_xonly_pubkey_cmp(none, NULL, &pk2) < 0); + CHECK(ecount == 1); + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, NULL) > 0); + CHECK(ecount == 2); + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk2, &pk2) == 0); + CHECK(ecount == 2); + memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */ + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); + CHECK(ecount == 3); + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); + CHECK(ecount == 6); + + secp256k1_context_destroy(none); +} + void test_xonly_pubkey_tweak(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; @@ -540,6 +577,7 @@ void run_extrakeys_tests(void) { test_xonly_pubkey_tweak(); test_xonly_pubkey_tweak_check(); test_xonly_pubkey_tweak_recursive(); + test_xonly_pubkey_comparison(); /* keypair tests */ test_keypair(); diff --git a/src/modules/recovery/main_impl.h b/src/modules/recovery/main_impl.h index 7a440a729b..9e19f2a2dc 100644 --- a/src/modules/recovery/main_impl.h +++ b/src/modules/recovery/main_impl.h @@ -7,7 +7,7 @@ #ifndef SECP256K1_MODULE_RECOVERY_MAIN_H #define SECP256K1_MODULE_RECOVERY_MAIN_H -#include "include/secp256k1_recovery.h" +#include "../../../include/secp256k1_recovery.h" static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) { (void)ctx; diff --git a/src/modules/recovery/tests_exhaustive_impl.h b/src/modules/recovery/tests_exhaustive_impl.h index 0ba9409c69..590a972ed3 100644 --- a/src/modules/recovery/tests_exhaustive_impl.h +++ b/src/modules/recovery/tests_exhaustive_impl.h @@ -8,7 +8,7 @@ #define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H #include "src/modules/recovery/main_impl.h" -#include "include/secp256k1_recovery.h" +#include "../../../include/secp256k1_recovery.h" void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group) { int i, j, k; diff --git a/src/modules/schnorrsig/main_impl.h b/src/modules/schnorrsig/main_impl.h index 22e1b33a5a..693b78f034 100644 --- a/src/modules/schnorrsig/main_impl.h +++ b/src/modules/schnorrsig/main_impl.h @@ -7,9 +7,9 @@ #ifndef SECP256K1_MODULE_SCHNORRSIG_MAIN_H #define SECP256K1_MODULE_SCHNORRSIG_MAIN_H -#include "include/secp256k1.h" -#include "include/secp256k1_schnorrsig.h" -#include "hash.h" +#include "../../../include/secp256k1.h" +#include "../../../include/secp256k1_schnorrsig.h" +#include "../../hash.h" /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */ @@ -43,16 +43,18 @@ static void secp256k1_nonce_function_bip340_sha256_tagged_aux(secp256k1_sha256 * sha->bytes = 64; } -/* algo16 argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340 +/* algo argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340 * by using the correct tagged hash function. */ -static const unsigned char bip340_algo16[16] = "BIP0340/nonce\0\0\0"; +static const unsigned char bip340_algo[13] = "BIP0340/nonce"; -static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { +static const unsigned char schnorrsig_extraparams_magic[4] = SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC; + +static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { secp256k1_sha256 sha; unsigned char masked_key[32]; int i; - if (algo16 == NULL) { + if (algo == NULL) { return 0; } @@ -65,18 +67,14 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms } } - /* Tag the hash with algo16 which is important to avoid nonce reuse across + /* Tag the hash with algo which is important to avoid nonce reuse across * algorithms. If this nonce function is used in BIP-340 signing as defined * in the spec, an optimized tagging implementation is used. */ - if (secp256k1_memcmp_var(algo16, bip340_algo16, 16) == 0) { + if (algolen == sizeof(bip340_algo) + && secp256k1_memcmp_var(algo, bip340_algo, algolen) == 0) { secp256k1_nonce_function_bip340_sha256_tagged(&sha); } else { - int algo16_len = 16; - /* Remove terminating null bytes */ - while (algo16_len > 0 && !algo16[algo16_len - 1]) { - algo16_len--; - } - secp256k1_sha256_initialize_tagged(&sha, algo16, algo16_len); + secp256k1_sha256_initialize_tagged(&sha, algo, algolen); } /* Hash (masked-)key||pk||msg using the tagged hash as per the spec */ @@ -86,7 +84,7 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms secp256k1_sha256_write(&sha, key32, 32); } secp256k1_sha256_write(&sha, xonly_pk32, 32); - secp256k1_sha256_write(&sha, msg32, 32); + secp256k1_sha256_write(&sha, msg, msglen); secp256k1_sha256_finalize(&sha, nonce32); return 1; } @@ -108,23 +106,23 @@ static void secp256k1_schnorrsig_sha256_tagged(secp256k1_sha256 *sha) { sha->bytes = 64; } -static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32) +static void secp256k1_schnorrsig_challenge(secp256k1_scalar* e, const unsigned char *r32, const unsigned char *msg, size_t msglen, const unsigned char *pubkey32) { unsigned char buf[32]; secp256k1_sha256 sha; - /* tagged hash(r.x, pk.x, msg32) */ + /* tagged hash(r.x, pk.x, msg) */ secp256k1_schnorrsig_sha256_tagged(&sha); secp256k1_sha256_write(&sha, r32, 32); secp256k1_sha256_write(&sha, pubkey32, 32); - secp256k1_sha256_write(&sha, msg32, 32); + secp256k1_sha256_write(&sha, msg, msglen); secp256k1_sha256_finalize(&sha, buf); /* Set scalar e to the challenge hash modulo the curve order as per * BIP340. */ secp256k1_scalar_set_b32(e, buf, NULL); } -int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) { +int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const secp256k1_keypair *keypair, secp256k1_nonce_function_hardened noncefp, void *ndata) { secp256k1_scalar sk; secp256k1_scalar e; secp256k1_scalar k; @@ -139,7 +137,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64 VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(sig64 != NULL); - ARG_CHECK(msg32 != NULL); + ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(keypair != NULL); if (noncefp == NULL) { @@ -156,7 +154,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64 secp256k1_scalar_get_b32(seckey, &sk); secp256k1_fe_get_b32(pk_buf, &pk.x); - ret &= !!noncefp(buf, msg32, seckey, pk_buf, bip340_algo16, ndata); + ret &= !!noncefp(buf, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); secp256k1_scalar_set_b32(&k, buf, NULL); ret &= !secp256k1_scalar_is_zero(&k); secp256k1_scalar_cmov(&k, &secp256k1_scalar_one, !ret); @@ -174,7 +172,7 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64 secp256k1_fe_normalize_var(&r.x); secp256k1_fe_get_b32(&sig64[0], &r.x); - secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf); + secp256k1_schnorrsig_challenge(&e, &sig64[0], msg, msglen, pk_buf); secp256k1_scalar_mul(&e, &e, &sk); secp256k1_scalar_add(&e, &e, &k); secp256k1_scalar_get_b32(&sig64[32], &e); @@ -187,7 +185,26 @@ int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64 return ret; } -int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const secp256k1_xonly_pubkey *pubkey) { +int secp256k1_schnorrsig_sign(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg32, const secp256k1_keypair *keypair, unsigned char *aux_rand32) { + return secp256k1_schnorrsig_sign_internal(ctx, sig64, msg32, 32, keypair, secp256k1_nonce_function_bip340, aux_rand32); +} + +int secp256k1_schnorrsig_sign_custom(const secp256k1_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const secp256k1_keypair *keypair, secp256k1_schnorrsig_extraparams *extraparams) { + secp256k1_nonce_function_hardened noncefp = NULL; + void *ndata = NULL; + VERIFY_CHECK(ctx != NULL); + + if (extraparams != NULL) { + ARG_CHECK(secp256k1_memcmp_var(extraparams->magic, + schnorrsig_extraparams_magic, + sizeof(extraparams->magic)) == 0); + noncefp = extraparams->noncefp; + ndata = extraparams->ndata; + } + return secp256k1_schnorrsig_sign_internal(ctx, sig64, msg, msglen, keypair, noncefp, ndata); +} + +int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, const secp256k1_xonly_pubkey *pubkey) { secp256k1_scalar s; secp256k1_scalar e; secp256k1_gej rj; @@ -201,7 +218,7 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha VERIFY_CHECK(ctx != NULL); ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(sig64 != NULL); - ARG_CHECK(msg32 != NULL); + ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(pubkey != NULL); if (!secp256k1_fe_set_b32(&rx, &sig64[0])) { @@ -219,7 +236,7 @@ int secp256k1_schnorrsig_verify(const secp256k1_context* ctx, const unsigned cha /* Compute e. */ secp256k1_fe_get_b32(buf, &pk.x); - secp256k1_schnorrsig_challenge(&e, &sig64[0], msg32, buf); + secp256k1_schnorrsig_challenge(&e, &sig64[0], msg, msglen, buf); /* Compute rj = s*G + (-e)*pkj */ secp256k1_scalar_negate(&e, &e); diff --git a/src/modules/schnorrsig/tests_exhaustive_impl.h b/src/modules/schnorrsig/tests_exhaustive_impl.h index b4a428729f..d8df9dd2df 100644 --- a/src/modules/schnorrsig/tests_exhaustive_impl.h +++ b/src/modules/schnorrsig/tests_exhaustive_impl.h @@ -7,7 +7,7 @@ #ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H #define SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H -#include "include/secp256k1_schnorrsig.h" +#include "../../../include/secp256k1_schnorrsig.h" #include "src/modules/schnorrsig/main_impl.h" static const unsigned char invalid_pubkey_bytes[][32] = { @@ -58,15 +58,19 @@ static const unsigned char invalid_pubkey_bytes[][32] = { #define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0])) -static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, +static int secp256k1_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg, + size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, - const unsigned char *algo16, void* data) { + const unsigned char *algo, size_t algolen, + void* data) { secp256k1_scalar s; int *idata = data; - (void)msg32; + (void)msg; + (void)msglen; (void)key32; (void)xonly_pk32; - (void)algo16; + (void)algo; + (void)algolen; secp256k1_scalar_set_int(&s, *idata); secp256k1_scalar_get_b32(nonce32, &s); return 1; @@ -101,7 +105,7 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons secp256k1_scalar e; unsigned char msg32[32]; secp256k1_testrand256(msg32); - secp256k1_schnorrsig_challenge(&e, sig64, msg32, pk32); + secp256k1_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { /* Iterate over the possible valid last 32 bytes in the signature. @@ -119,7 +123,7 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons secp256k1_testrand256(sig64 + 32); expect_valid = 0; } - valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]); + valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]); CHECK(valid == expect_valid); count_valid += valid; } @@ -137,6 +141,8 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const secp256k1_keypair* keypairs, const int* parities) { int d, k; uint64_t iter = 0; + secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + /* Loop over keys. */ for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) { int actual_d = d; @@ -149,19 +155,21 @@ static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsign unsigned char sig64[64]; int actual_k = k; if (skip_section(&iter)) continue; + extraparams.noncefp = secp256k1_hardened_nonce_function_smallint; + extraparams.ndata = &k; if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k; /* Generate random messages until all challenges have been tried. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { secp256k1_scalar e; secp256k1_testrand256(msg32); - secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]); + secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { secp256k1_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; unsigned char expected_s_bytes[32]; secp256k1_scalar_get_b32(expected_s_bytes, &expected_s); /* Invoke the real function to construct a signature. */ - CHECK(secp256k1_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], secp256k1_hardened_nonce_function_smallint, &k)); + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig64, msg32, sizeof(msg32), &keypairs[d - 1], &extraparams)); /* The first 32 bytes must match the xonly pubkey for the specified k. */ CHECK(secp256k1_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); /* The last 32 bytes must match the expected s value. */ diff --git a/src/modules/schnorrsig/tests_impl.h b/src/modules/schnorrsig/tests_impl.h index 338462fc9d..59357afa99 100644 --- a/src/modules/schnorrsig/tests_impl.h +++ b/src/modules/schnorrsig/tests_impl.h @@ -7,16 +7,16 @@ #ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_H #define SECP256K1_MODULE_SCHNORRSIG_TESTS_H -#include "secp256k1_schnorrsig.h" +#include "../../../include/secp256k1_schnorrsig.h" /* Checks that a bit flip in the n_flip-th argument (that has n_bytes many * bytes) changes the hash function */ -void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) { +void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes, size_t msglen, size_t algolen) { unsigned char nonces[2][32]; - CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1); + CHECK(nonce_function_bip340(nonces[0], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); secp256k1_testrand_flip(args[n_flip], n_bytes); - CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1); + CHECK(nonce_function_bip340(nonces[1], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); CHECK(secp256k1_memcmp_var(nonces[0], nonces[1], 32) != 0); } @@ -34,11 +34,13 @@ void test_sha256_eq(const secp256k1_sha256 *sha1, const secp256k1_sha256 *sha2) void run_nonce_function_bip340_tests(void) { unsigned char tag[13] = "BIP0340/nonce"; unsigned char aux_tag[11] = "BIP0340/aux"; - unsigned char algo16[16] = "BIP0340/nonce\0\0\0"; + unsigned char algo[13] = "BIP0340/nonce"; + size_t algolen = sizeof(algo); secp256k1_sha256 sha; secp256k1_sha256 sha_optimized; unsigned char nonce[32]; unsigned char msg[32]; + size_t msglen = sizeof(msg); unsigned char key[32]; unsigned char pk[32]; unsigned char aux_rand[32]; @@ -68,33 +70,45 @@ void run_nonce_function_bip340_tests(void) { args[0] = msg; args[1] = key; args[2] = pk; - args[3] = algo16; + args[3] = algo; args[4] = aux_rand; for (i = 0; i < count; i++) { - nonce_function_bip340_bitflip(args, 0, 32); - nonce_function_bip340_bitflip(args, 1, 32); - nonce_function_bip340_bitflip(args, 2, 32); - /* Flip algo16 special case "BIP0340/nonce" */ - nonce_function_bip340_bitflip(args, 3, 16); - /* Flip algo16 again */ - nonce_function_bip340_bitflip(args, 3, 16); - nonce_function_bip340_bitflip(args, 4, 32); + nonce_function_bip340_bitflip(args, 0, 32, msglen, algolen); + nonce_function_bip340_bitflip(args, 1, 32, msglen, algolen); + nonce_function_bip340_bitflip(args, 2, 32, msglen, algolen); + /* Flip algo special case "BIP0340/nonce" */ + nonce_function_bip340_bitflip(args, 3, algolen, msglen, algolen); + /* Flip algo again */ + nonce_function_bip340_bitflip(args, 3, algolen, msglen, algolen); + nonce_function_bip340_bitflip(args, 4, 32, msglen, algolen); } - /* NULL algo16 is disallowed */ - CHECK(nonce_function_bip340(nonce, msg, key, pk, NULL, NULL) == 0); - /* Empty algo16 is fine */ - memset(algo16, 0x00, 16); - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); - /* algo16 with terminating null bytes is fine */ - algo16[1] = 65; - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); - /* Other algo16 is fine */ - memset(algo16, 0xFF, 16); - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); + /* NULL algo is disallowed */ + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, NULL, 0, NULL) == 0); + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); + /* Other algo is fine */ + secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, algo, algolen); + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); + + for (i = 0; i < count; i++) { + unsigned char nonce2[32]; + uint32_t offset = secp256k1_testrand_int(msglen - 1); + size_t msglen_tmp = (msglen + offset) % msglen; + size_t algolen_tmp; + + /* Different msglen gives different nonce */ + CHECK(nonce_function_bip340(nonce2, msg, msglen_tmp, key, pk, algo, algolen, NULL) == 1); + CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0); + + /* Different algolen gives different nonce */ + offset = secp256k1_testrand_int(algolen - 1); + algolen_tmp = (algolen + offset) % algolen; + CHECK(nonce_function_bip340(nonce2, msg, msglen, key, pk, algo, algolen_tmp, NULL) == 1); + CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0); + } /* NULL aux_rand argument is allowed. */ - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); } void test_schnorrsig_api(void) { @@ -103,10 +117,12 @@ void test_schnorrsig_api(void) { unsigned char sk3[32]; unsigned char msg[32]; secp256k1_keypair keypairs[3]; - secp256k1_keypair invalid_keypair = { 0 }; + secp256k1_keypair invalid_keypair = {{ 0 }}; secp256k1_xonly_pubkey pk[3]; secp256k1_xonly_pubkey zero_pk; unsigned char sig[64]; + secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + secp256k1_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL}; /** setup **/ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); @@ -138,36 +154,60 @@ void test_schnorrsig_api(void) { /** main test body **/ ecount = 0; - CHECK(secp256k1_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(secp256k1_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1); + CHECK(ecount == 2); + CHECK(secp256k1_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL) == 0); + CHECK(ecount == 3); + CHECK(secp256k1_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, NULL, NULL) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL) == 0); + CHECK(ecount == 6); + + ecount = 0; + CHECK(secp256k1_schnorrsig_sign_custom(none, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 1); - CHECK(secp256k1_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(secp256k1_schnorrsig_sign_custom(vrfy, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 2); - CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); + CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); CHECK(ecount == 2); - CHECK(secp256k1_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(secp256k1_schnorrsig_sign_custom(sign, NULL, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 3); - CHECK(secp256k1_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0); + CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, NULL, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 4); - CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0); + CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, NULL, 0, &keypairs[0], &extraparams) == 1); + CHECK(ecount == 4); + CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), NULL, &extraparams) == 0); CHECK(ecount == 5); - CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0); + CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &invalid_keypair, &extraparams) == 0); + CHECK(ecount == 6); + CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1); CHECK(ecount == 6); + CHECK(secp256k1_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams) == 0); + CHECK(ecount == 7); ecount = 0; - CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); - CHECK(secp256k1_schnorrsig_verify(none, sig, msg, &pk[0]) == 0); + CHECK(secp256k1_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1); + CHECK(secp256k1_schnorrsig_verify(none, sig, msg, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 1); - CHECK(secp256k1_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0); + CHECK(secp256k1_schnorrsig_verify(sign, sig, msg, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 2); - CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1); + CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &pk[0]) == 1); CHECK(ecount == 2); - CHECK(secp256k1_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0); + CHECK(secp256k1_schnorrsig_verify(vrfy, NULL, msg, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 3); - CHECK(secp256k1_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0); + CHECK(secp256k1_schnorrsig_verify(vrfy, sig, NULL, sizeof(msg), &pk[0]) == 0); + CHECK(ecount == 4); + CHECK(secp256k1_schnorrsig_verify(vrfy, sig, NULL, 0, &pk[0]) == 0); CHECK(ecount == 4); - CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, NULL) == 0); + CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), NULL) == 0); CHECK(ecount == 5); - CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0); + CHECK(secp256k1_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &zero_pk) == 0); CHECK(ecount == 6); secp256k1_context_destroy(none); @@ -179,7 +219,7 @@ void test_schnorrsig_api(void) { /* Checks that hash initialized by secp256k1_schnorrsig_sha256_tagged has the * expected state. */ void test_schnorrsig_sha256_tagged(void) { - char tag[17] = "BIP0340/challenge"; + unsigned char tag[17] = "BIP0340/challenge"; secp256k1_sha256 sha; secp256k1_sha256 sha_optimized; @@ -190,19 +230,19 @@ void test_schnorrsig_sha256_tagged(void) { /* Helper function for schnorrsig_bip_vectors * Signs the message and checks that it's the same as expected_sig. */ -void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg, const unsigned char *expected_sig) { +void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg32, const unsigned char *expected_sig) { unsigned char sig[64]; secp256k1_keypair keypair; secp256k1_xonly_pubkey pk, pk_expected; CHECK(secp256k1_keypair_create(ctx, &keypair, sk)); - CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand)); + CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg32, &keypair, aux_rand)); CHECK(secp256k1_memcmp_var(sig, expected_sig, 64) == 0); CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized)); CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); CHECK(secp256k1_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); - CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &pk)); + CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); } /* Helper function for schnorrsig_bip_vectors @@ -211,7 +251,7 @@ void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized secp256k1_xonly_pubkey pk; CHECK(secp256k1_xonly_pubkey_parse(ctx, &pk, pk_serialized)); - CHECK(expected == secp256k1_schnorrsig_verify(ctx, sig, msg32, &pk)); + CHECK(expected == secp256k1_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); } /* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See @@ -634,22 +674,26 @@ void test_schnorrsig_bip_vectors(void) { } /* Nonce function that returns constant 0 */ -static int nonce_function_failing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { - (void) msg32; +static int nonce_function_failing(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { + (void) msg; + (void) msglen; (void) key32; (void) xonly_pk32; - (void) algo16; + (void) algo; + (void) algolen; (void) data; (void) nonce32; return 0; } /* Nonce function that sets nonce to 0 */ -static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { - (void) msg32; +static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { + (void) msg; + (void) msglen; (void) key32; (void) xonly_pk32; - (void) algo16; + (void) algo; + (void) algolen; (void) data; memset(nonce32, 0, 32); @@ -657,11 +701,13 @@ static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg32, } /* Nonce function that sets nonce to 0xFF...0xFF */ -static int nonce_function_overflowing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { - (void) msg32; +static int nonce_function_overflowing(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { + (void) msg; + (void) msglen; (void) key32; (void) xonly_pk32; - (void) algo16; + (void) algo; + (void) algolen; (void) data; memset(nonce32, 0xFF, 32); @@ -670,24 +716,45 @@ static int nonce_function_overflowing(unsigned char *nonce32, const unsigned cha void test_schnorrsig_sign(void) { unsigned char sk[32]; + secp256k1_xonly_pubkey pk; secp256k1_keypair keypair; const unsigned char msg[32] = "this is a msg for a schnorrsig.."; unsigned char sig[64]; + unsigned char sig2[64]; unsigned char zeros64[64] = { 0 }; + secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + unsigned char aux_rand[32]; secp256k1_testrand256(sk); + secp256k1_testrand256(aux_rand); CHECK(secp256k1_keypair_create(ctx, &keypair, sk)); - CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); + CHECK(secp256k1_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1); + CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); /* Test different nonce functions */ + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); memset(sig, 1, sizeof(sig)); - CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0); + extraparams.noncefp = nonce_function_failing; + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); - CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0); + extraparams.noncefp = nonce_function_0; + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) == 0); - CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1); - CHECK(secp256k1_memcmp_var(sig, zeros64, sizeof(sig)) != 0); + memset(&sig, 1, sizeof(sig)); + extraparams.noncefp = nonce_function_overflowing; + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); + + /* When using the default nonce function, schnorrsig_sign_custom produces + * the same result as schnorrsig_sign with aux_rand = extraparams.ndata */ + extraparams.noncefp = NULL; + extraparams.ndata = aux_rand; + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(secp256k1_schnorrsig_sign(ctx, sig2, msg, &keypair, extraparams.ndata) == 1); + CHECK(secp256k1_memcmp_var(sig, sig2, sizeof(sig)) == 0); } #define N_SIGS 3 @@ -709,8 +776,8 @@ void test_schnorrsig_sign_verify(void) { for (i = 0; i < N_SIGS; i++) { secp256k1_testrand256(msg[i]); - CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL)); - CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], &pk)); + CHECK(secp256k1_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL)); + CHECK(secp256k1_schnorrsig_verify(ctx, sig[i], msg[i], sizeof(msg[i]), &pk)); } { @@ -720,36 +787,54 @@ void test_schnorrsig_sign_verify(void) { size_t byte_idx = secp256k1_testrand_int(32); unsigned char xorbyte = secp256k1_testrand_int(254)+1; sig[sig_idx][byte_idx] ^= xorbyte; - CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][byte_idx] ^= xorbyte; byte_idx = secp256k1_testrand_int(32); sig[sig_idx][32+byte_idx] ^= xorbyte; - CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][32+byte_idx] ^= xorbyte; byte_idx = secp256k1_testrand_int(32); msg[sig_idx][byte_idx] ^= xorbyte; - CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); msg[sig_idx][byte_idx] ^= xorbyte; /* Check that above bitflips have been reversed correctly */ - CHECK(secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(secp256k1_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); } /* Test overflowing s */ - CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); - CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL)); + CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); memset(&sig[0][32], 0xFF, 32); - CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); /* Test negative s */ - CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); - CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(secp256k1_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL)); + CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); secp256k1_scalar_set_b32(&s, &sig[0][32], NULL); secp256k1_scalar_negate(&s, &s); secp256k1_scalar_get_b32(&sig[0][32], &s); - CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(!secp256k1_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); + + /* The empty message can be signed & verified */ + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig[0], NULL, 0, &keypair, NULL) == 1); + CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], NULL, 0, &pk) == 1); + + { + /* Test varying message lengths */ + unsigned char msg_large[32 * 8]; + uint32_t msglen = secp256k1_testrand_int(sizeof(msg_large)); + for (i = 0; i < sizeof(msg_large); i += 32) { + secp256k1_testrand256(&msg_large[i]); + } + CHECK(secp256k1_schnorrsig_sign_custom(ctx, sig[0], msg_large, msglen, &keypair, NULL) == 1); + CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 1); + /* Verification for a random wrong message length fails */ + msglen = (msglen + (sizeof(msg_large) - 1)) % sizeof(msg_large); + CHECK(secp256k1_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 0); + } } #undef N_SIGS @@ -777,10 +862,10 @@ void test_schnorrsig_taproot(void) { /* Key spend */ secp256k1_testrand256(msg); - CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); + CHECK(secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1); /* Verify key spend */ CHECK(secp256k1_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1); - CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1); + CHECK(secp256k1_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &output_pk) == 1); /* Script spend */ CHECK(secp256k1_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1); diff --git a/src/secp256k1.c b/src/secp256k1.c index aef3f99ac3..9908cab864 100644 --- a/src/secp256k1.c +++ b/src/secp256k1.c @@ -4,8 +4,10 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#define SECP256K1_BUILD + +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "assumptions.h" #include "util.h" @@ -21,6 +23,10 @@ #include "scratch_impl.h" #include "selftest.h" +#ifdef SECP256K1_NO_BUILD +# error "secp256k1.h processed without SECP256K1_BUILD defined while building secp256k1.c" +#endif + #if defined(VALGRIND) # include #endif @@ -316,6 +322,32 @@ int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *o return ret; } +int secp256k1_ec_pubkey_cmp(const secp256k1_context* ctx, const secp256k1_pubkey* pubkey0, const secp256k1_pubkey* pubkey1) { + unsigned char out[2][33]; + const secp256k1_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pubkey0; pk[1] = pubkey1; + for (i = 0; i < 2; i++) { + size_t out_size = sizeof(out[i]); + /* If the public key is NULL or invalid, ec_pubkey_serialize will call + * the illegal_callback and return 0. In that case we will serialize the + * key as all zeros which is less than any valid public key. This + * results in consistent comparisons even if NULL or invalid pubkeys are + * involved and prevents edge cases such as sorting algorithms that use + * this function and do not terminate as a result. */ + if (!secp256k1_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + /* Note that ec_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return secp256k1_memcmp_var(out[0], out[1], sizeof(out[0])); +} + static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) { (void)ctx; if (sizeof(secp256k1_scalar) == 32) { @@ -758,6 +790,19 @@ int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey * return 1; } +int secp256k1_tagged_sha256(const secp256k1_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { + secp256k1_sha256 sha; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(hash32 != NULL); + ARG_CHECK(tag != NULL); + ARG_CHECK(msg != NULL); + + secp256k1_sha256_initialize_tagged(&sha, tag, taglen); + secp256k1_sha256_write(&sha, msg, msglen); + secp256k1_sha256_finalize(&sha, hash32); + return 1; +} + #ifdef ENABLE_MODULE_ECDH # include "modules/ecdh/main_impl.h" #endif diff --git a/src/testrand_impl.h b/src/testrand_impl.h index e643778f36..c8d30ef6a8 100644 --- a/src/testrand_impl.h +++ b/src/testrand_impl.h @@ -127,7 +127,7 @@ static void secp256k1_testrand_init(const char* hexseed) { pos++; } } else { - FILE *frand = fopen("/dev/urandom", "r"); + FILE *frand = fopen("/dev/urandom", "rb"); if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) { uint64_t t = time(NULL) * (uint64_t)1337; fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n"); diff --git a/src/tests.c b/src/tests.c index a146394305..99d9468e29 100644 --- a/src/tests.c +++ b/src/tests.c @@ -15,8 +15,8 @@ #include #include "secp256k1.c" -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "testrand_impl.h" #include "util.h" @@ -30,8 +30,8 @@ void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) # endif #endif -#include "contrib/lax_der_parsing.c" -#include "contrib/lax_der_privatekey_parsing.c" +#include "../contrib/lax_der_parsing.c" +#include "../contrib/lax_der_privatekey_parsing.c" #include "modinv32_impl.h" #ifdef SECP256K1_WIDEMUL_INT128 @@ -564,6 +564,38 @@ void run_rfc6979_hmac_sha256_tests(void) { secp256k1_rfc6979_hmac_sha256_finalize(&rng); } +void run_tagged_sha256_tests(void) { + int ecount = 0; + secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); + unsigned char tag[32] = { 0 }; + unsigned char msg[32] = { 0 }; + unsigned char hash32[32]; + unsigned char hash_expected[32] = { + 0x04, 0x7A, 0x5E, 0x17, 0xB5, 0x86, 0x47, 0xC1, + 0x3C, 0xC6, 0xEB, 0xC0, 0xAA, 0x58, 0x3B, 0x62, + 0xFB, 0x16, 0x43, 0x32, 0x68, 0x77, 0x40, 0x6C, + 0xE2, 0x76, 0x55, 0x9A, 0x3B, 0xDE, 0x55, 0xB3 + }; + + secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + + /* API test */ + CHECK(secp256k1_tagged_sha256(none, hash32, tag, sizeof(tag), msg, sizeof(msg)) == 1); + CHECK(secp256k1_tagged_sha256(none, NULL, tag, sizeof(tag), msg, sizeof(msg)) == 0); + CHECK(ecount == 1); + CHECK(secp256k1_tagged_sha256(none, hash32, NULL, 0, msg, sizeof(msg)) == 0); + CHECK(ecount == 2); + CHECK(secp256k1_tagged_sha256(none, hash32, tag, sizeof(tag), NULL, 0) == 0); + CHECK(ecount == 3); + + /* Static test vector */ + memcpy(tag, "tag", 3); + memcpy(msg, "msg", 3); + CHECK(secp256k1_tagged_sha256(none, hash32, tag, 3, msg, 3) == 1); + CHECK(secp256k1_memcmp_var(hash32, hash_expected, sizeof(hash32)) == 0); + secp256k1_context_destroy(none); +} + /***** RANDOM TESTS *****/ void test_rand_bits(int rand32, int bits) { @@ -2508,6 +2540,70 @@ void run_field_misc(void) { } } +void test_fe_mul(const secp256k1_fe* a, const secp256k1_fe* b, int use_sqr) +{ + secp256k1_fe c, an, bn; + /* Variables in BE 32-byte format. */ + unsigned char a32[32], b32[32], c32[32]; + /* Variables in LE 16x uint16_t format. */ + uint16_t a16[16], b16[16], c16[16]; + /* Field modulus in LE 16x uint16_t format. */ + static const uint16_t m16[16] = { + 0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + }; + uint16_t t16[32]; + int i; + + /* Compute C = A * B in fe format. */ + c = *a; + if (use_sqr) { + secp256k1_fe_sqr(&c, &c); + } else { + secp256k1_fe_mul(&c, &c, b); + } + + /* Convert A, B, C into LE 16x uint16_t format. */ + an = *a; + bn = *b; + secp256k1_fe_normalize_var(&c); + secp256k1_fe_normalize_var(&an); + secp256k1_fe_normalize_var(&bn); + secp256k1_fe_get_b32(a32, &an); + secp256k1_fe_get_b32(b32, &bn); + secp256k1_fe_get_b32(c32, &c); + for (i = 0; i < 16; ++i) { + a16[i] = a32[31 - 2*i] + ((uint16_t)a32[30 - 2*i] << 8); + b16[i] = b32[31 - 2*i] + ((uint16_t)b32[30 - 2*i] << 8); + c16[i] = c32[31 - 2*i] + ((uint16_t)c32[30 - 2*i] << 8); + } + /* Compute T = A * B in LE 16x uint16_t format. */ + mulmod256(t16, a16, b16, m16); + /* Compare */ + CHECK(secp256k1_memcmp_var(t16, c16, 32) == 0); +} + +void run_fe_mul(void) { + int i; + for (i = 0; i < 100 * count; ++i) { + secp256k1_fe a, b, c, d; + random_fe(&a); + random_field_element_magnitude(&a); + random_fe(&b); + random_field_element_magnitude(&b); + random_fe_test(&c); + random_field_element_magnitude(&c); + random_fe_test(&d); + random_field_element_magnitude(&d); + test_fe_mul(&a, &a, 1); + test_fe_mul(&c, &c, 1); + test_fe_mul(&a, &b, 0); + test_fe_mul(&a, &c, 0); + test_fe_mul(&c, &b, 0); + test_fe_mul(&c, &d, 0); + } +} + void run_sqr(void) { secp256k1_fe x, s; @@ -2595,7 +2691,7 @@ void test_inverse_scalar(secp256k1_scalar* out, const secp256k1_scalar* x, int v { secp256k1_scalar l, r, t; - (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse_var)(&l, x); /* l = 1/x */ + (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse)(&l, x); /* l = 1/x */ if (out) *out = l; if (secp256k1_scalar_is_zero(x)) { CHECK(secp256k1_scalar_is_zero(&l)); @@ -2605,9 +2701,9 @@ void test_inverse_scalar(secp256k1_scalar* out, const secp256k1_scalar* x, int v CHECK(secp256k1_scalar_is_one(&t)); /* x*(1/x) == 1 */ secp256k1_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */ if (secp256k1_scalar_is_zero(&r)) return; - (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse_var)(&r, &r); /* r = 1/(x-1) */ + (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse)(&r, &r); /* r = 1/(x-1) */ secp256k1_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */ - (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse_var)(&l, &l); /* l = 1/(1/x-1) */ + (var ? secp256k1_scalar_inverse_var : secp256k1_scalar_inverse)(&l, &l); /* l = 1/(1/x-1) */ secp256k1_scalar_add(&l, &l, &secp256k1_scalar_one); /* l = 1/(1/x-1)+1 */ secp256k1_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */ CHECK(secp256k1_scalar_is_zero(&l)); /* l == 0 */ @@ -3101,20 +3197,34 @@ void test_ge(void) { /* Test batch gej -> ge conversion with many infinities. */ for (i = 0; i < 4 * runs + 1; i++) { + int odd; random_group_element_test(&ge[i]); + odd = secp256k1_fe_is_odd(&ge[i].x); + CHECK(odd == 0 || odd == 1); /* randomly set half the points to infinity */ - if(secp256k1_fe_is_odd(&ge[i].x)) { + if (odd == i % 2) { secp256k1_ge_set_infinity(&ge[i]); } secp256k1_gej_set_ge(&gej[i], &ge[i]); } - /* batch invert */ + /* batch convert */ secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { ge_equals_gej(&ge[i], &gej[i]); } + /* Test batch gej -> ge conversion with all infinities. */ + for (i = 0; i < 4 * runs + 1; i++) { + secp256k1_gej_set_infinity(&gej[i]); + } + /* batch convert */ + secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + /* check result */ + for (i = 0; i < 4 * runs + 1; i++) { + CHECK(secp256k1_ge_is_infinity(&ge[i])); + } + free(ge); free(gej); } @@ -5434,6 +5544,55 @@ void test_random_pubkeys(void) { } } +void run_pubkey_comparison(void) { + unsigned char pk1_ser[33] = { + 0x02, + 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11, + 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23 + }; + const unsigned char pk2_ser[33] = { + 0x02, + 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, + 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c + }; + secp256k1_pubkey pk1; + secp256k1_pubkey pk2; + int32_t ecount = 0; + + CHECK(secp256k1_ec_pubkey_parse(ctx, &pk1, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(secp256k1_ec_pubkey_parse(ctx, &pk2, pk2_ser, sizeof(pk2_ser)) == 1); + + secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + CHECK(secp256k1_ec_pubkey_cmp(ctx, NULL, &pk2) < 0); + CHECK(ecount == 1); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, NULL) > 0); + CHECK(ecount == 2); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, &pk1) == 0); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk2) == 0); + CHECK(ecount == 2); + { + secp256k1_pubkey pk_tmp; + memset(&pk_tmp, 0, sizeof(pk_tmp)); /* illegal pubkey */ + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk_tmp, &pk2) < 0); + CHECK(ecount == 3); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk_tmp, &pk_tmp) == 0); + CHECK(ecount == 5); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk_tmp) > 0); + CHECK(ecount == 6); + } + + secp256k1_context_set_illegal_callback(ctx, NULL, NULL); + + /* Make pk2 the same as pk1 but with 3 rather than 2. Note that in + * an uncompressed encoding, these would have the opposite ordering */ + pk1_ser[0] = 3; + CHECK(secp256k1_ec_pubkey_parse(ctx, &pk2, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(secp256k1_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); +} + void run_random_pubkeys(void) { int i; for (i = 0; i < 10*count; i++) { @@ -6408,7 +6567,7 @@ int main(int argc, char **argv) { count = strtol(argv[1], NULL, 0); } else { const char* env = getenv("SECP256K1_TEST_ITERS"); - if (env) { + if (env && strlen(env) > 0) { count = strtol(env, NULL, 0); } } @@ -6442,6 +6601,7 @@ int main(int argc, char **argv) { run_sha256_tests(); run_hmac_sha256_tests(); run_rfc6979_hmac_sha256_tests(); + run_tagged_sha256_tests(); /* scalar tests */ run_scalar_tests(); @@ -6449,6 +6609,7 @@ int main(int argc, char **argv) { /* field tests */ run_field_misc(); run_field_convert(); + run_fe_mul(); run_sqr(); run_sqrt(); @@ -6485,6 +6646,7 @@ int main(int argc, char **argv) { #endif /* ecdsa tests */ + run_pubkey_comparison(); run_random_pubkeys(); run_ecdsa_der_parse(); run_ecdsa_sign_verify(); diff --git a/src/tests_exhaustive.c b/src/tests_exhaustive.c index 2bb5381446..5b9a3035d9 100644 --- a/src/tests_exhaustive.c +++ b/src/tests_exhaustive.c @@ -10,7 +10,6 @@ #include #include - #include #undef USE_ECMULT_STATIC_PRECOMPUTATION @@ -20,10 +19,10 @@ #define EXHAUSTIVE_TEST_ORDER 13 #endif -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "assumptions.h" #include "group.h" -#include "secp256k1.c" #include "testrand_impl.h" static int count = 2; @@ -303,6 +302,7 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou if (skip_section(&iter)) continue; for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; + int ret; secp256k1_ecdsa_signature sig; secp256k1_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; @@ -311,7 +311,8 @@ void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *grou secp256k1_scalar_get_b32(sk32, &sk); secp256k1_scalar_get_b32(msg32, &msg); - secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + ret = secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); + CHECK(ret == 1); secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important diff --git a/src/valgrind_ctime_test.c b/src/valgrind_ctime_test.c index cfca5a196e..ea6d4b3deb 100644 --- a/src/valgrind_ctime_test.c +++ b/src/valgrind_ctime_test.c @@ -7,24 +7,24 @@ #include #include -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #ifdef ENABLE_MODULE_ECDH -# include "include/secp256k1_ecdh.h" +# include "../include/secp256k1_ecdh.h" #endif #ifdef ENABLE_MODULE_RECOVERY -# include "include/secp256k1_recovery.h" +# include "../include/secp256k1_recovery.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS -# include "include/secp256k1_extrakeys.h" +# include "../include/secp256k1_extrakeys.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG -#include "include/secp256k1_schnorrsig.h" +#include "../include/secp256k1_schnorrsig.h" #endif void run_tests(secp256k1_context *ctx, unsigned char *key); @@ -166,7 +166,7 @@ void run_tests(secp256k1_context *ctx, unsigned char *key) { ret = secp256k1_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); - ret = secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL); + ret = secp256k1_schnorrsig_sign(ctx, sig, msg, &keypair, NULL); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif From 84c874794cc1e1aa48f2edf08c355ec2a5696cb6 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Wed, 23 Jun 2021 16:54:39 +0200 Subject: [PATCH 019/112] test: remove unneeded initialization code in feature_rbf.py --- test/functional/feature_rbf.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index ed944274e3..dc6ebcb227 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -84,11 +84,6 @@ def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): - make_utxo(self.nodes[0], 1 * COIN) - - # Ensure nodes are synced - self.sync_all() - self.log.info("Running test simple doublespend...") self.test_simple_doublespend() From fb6c6a7938cb7c4808ad88d23bfc2b7408407b12 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Fri, 18 Jun 2021 18:09:27 +0200 Subject: [PATCH 020/112] test: speedup wallet_listtransactions by whitelisting peers (immediate tx relay) By whitelisting the peers via -whitelist, the inventory is transmissioned immediately rather than on average every 5 seconds, speeding up the test by at least a factor of two: before: $ time ./wallet_listtransactions.py ... 0m40.25s real 0m01.74s user 0m01.70s system with this PR: $ time ./wallet_listtransactions.py ... 0m14.93s real 0m01.68s user 0m01.87s system This commit also moves the wallet_listtransactions tests into the < 30s group. --- test/functional/test_runner.py | 4 ++-- test/functional/wallet_listtransactions.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 8afd8b3bc1..cbd773b704 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -109,8 +109,6 @@ 'p2p_tx_download.py', 'mempool_updatefromblock.py', 'wallet_dump.py --legacy-wallet', - 'wallet_listtransactions.py --legacy-wallet', - 'wallet_listtransactions.py --descriptors', 'feature_taproot.py --previous_release', 'feature_taproot.py', 'rpc_signer.py', @@ -159,6 +157,8 @@ 'wallet_createwallet.py --legacy-wallet', 'wallet_createwallet.py --usecli', 'wallet_createwallet.py --descriptors', + 'wallet_listtransactions.py --legacy-wallet', + 'wallet_listtransactions.py --descriptors', 'wallet_watchonly.py --legacy-wallet', 'wallet_watchonly.py --usecli --legacy-wallet', 'wallet_reorgsrestore.py', diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index 8b503f5971..df1cbd5ede 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -18,6 +18,9 @@ class ListTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 + # This test isn't testing txn relay/timing, so set whitelist on the + # peers for instant txn relay. This speeds up the test run time 2-3x. + self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_wallet() From 47915b118720c6e2b2ec9f599f25848041b42b99 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sat, 19 Jun 2021 22:42:08 +0200 Subject: [PATCH 021/112] test: remove unneeded/redundant code in wallet_listtransactions -> remove unneeded get-out-of IBD generate() (The test framework already sets up the nodes to be out of IBD in setup_nodes(), if setup_clean_chain is not set to True) -> remove duplicate code line assigning an utxo --- test/functional/wallet_listtransactions.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index df1cbd5ede..7557dbe8c8 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -26,8 +26,6 @@ def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): - self.nodes[0].generate(1) # Get out of IBD - self.sync_all() # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() @@ -136,7 +134,6 @@ def get_unconfirmed_utxo_entry(node, txid_to_match): utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1) assert_equal(utxo_to_use["safe"], True) utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) - utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) assert_equal(utxo_to_use["safe"], False) # Create tx2 using createrawtransaction From a006d7d73019b8cf4d68626c019c3d69729dda69 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sun, 20 Jun 2021 13:28:22 +0200 Subject: [PATCH 022/112] test: add logging to wallet_listtransactions Co-authored-by: Jon Atack --- test/functional/wallet_listtransactions.py | 29 +++++++++++++--------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index 7557dbe8c8..c0386f5d70 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -26,7 +26,7 @@ def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): - # Simple send, 0 to 1: + self.log.info("Test simple send from node0 to node1") txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), @@ -35,7 +35,7 @@ def run_test(self): assert_array_result(self.nodes[1].listtransactions(), {"txid": txid}, {"category": "receive", "amount": Decimal("0.1"), "confirmations": 0}) - # mine a block, confirmations should change: + self.log.info("Test confirmations change after mining a block") blockhash = self.nodes[0].generate(1)[0] blockheight = self.nodes[0].getblockheader(blockhash)['height'] self.sync_all() @@ -46,7 +46,7 @@ def run_test(self): {"txid": txid}, {"category": "receive", "amount": Decimal("0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight}) - # send-to-self: + self.log.info("Test send-to-self on node0") txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid, "category": "send"}, @@ -55,7 +55,7 @@ def run_test(self): {"txid": txid, "category": "receive"}, {"amount": Decimal("0.2")}) - # sendmany from node1: twice to self, twice to node2: + self.log.info("Test sendmany from node1: twice to self, twice to node0") send_to = {self.nodes[0].getnewaddress(): 0.11, self.nodes[1].getnewaddress(): 0.22, self.nodes[0].getnewaddress(): 0.33, @@ -89,6 +89,7 @@ def run_test(self): if not self.options.descriptors: # include_watchonly is a legacy wallet feature, so don't test it for descriptor wallets + self.log.info("Test 'include_watchonly' feature (legacy wallet)") pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey'] multisig = self.nodes[1].createmultisig(1, [pubkey]) self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True) @@ -104,33 +105,35 @@ def run_test(self): self.run_rbf_opt_in_test() - # Check that the opt-in-rbf flag works properly, for sent and received - # transactions. + def run_rbf_opt_in_test(self): - # Check whether a transaction signals opt-in RBF itself + """Test the opt-in-rbf flag for sent and received transactions.""" + def is_opt_in(node, txid): + """Check whether a transaction signals opt-in RBF itself.""" rawtx = node.getrawtransaction(txid, 1) for x in rawtx["vin"]: if x["sequence"] < 0xfffffffe: return True return False - # Find an unconfirmed output matching a certain txid def get_unconfirmed_utxo_entry(node, txid_to_match): + """Find an unconfirmed output matching a certain txid.""" utxo = node.listunspent(0, 0) for i in utxo: if i["txid"] == txid_to_match: return i return None - # 1. Chain a few transactions that don't opt-in. + self.log.info("Test txs w/o opt-in RBF (bip125-replaceable=no)") + # Chain a few transactions that don't opt in. txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) assert not is_opt_in(self.nodes[0], txid_1) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"}) self.sync_mempools() assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"}) - # Tx2 will build off txid_1, still not opting in to RBF. + # Tx2 will build off tx1, still not opting in to RBF. utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1) assert_equal(utxo_to_use["safe"], True) utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1) @@ -149,6 +152,7 @@ def get_unconfirmed_utxo_entry(node, txid_to_match): self.sync_mempools() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"}) + self.log.info("Test txs with opt-in RBF (bip125-replaceable=yes)") # Tx3 will opt-in to RBF utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2) inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}] @@ -179,6 +183,7 @@ def get_unconfirmed_utxo_entry(node, txid_to_match): self.sync_mempools() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"}) + self.log.info("Test tx with unknown RBF state (bip125-replaceable=unknown)") # Replace tx3, and check that tx4 becomes unknown tx3_b = tx3_modified tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee @@ -191,7 +196,7 @@ def get_unconfirmed_utxo_entry(node, txid_to_match): self.sync_mempools() assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"}) - # Check gettransaction as well: + self.log.info("Test bip125-replaceable status with gettransaction RPC") for n in self.nodes[0:2]: assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no") assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no") @@ -199,7 +204,7 @@ def get_unconfirmed_utxo_entry(node, txid_to_match): assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes") assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown") - # After mining a transaction, it's no longer BIP125-replaceable + self.log.info("Test mined transactions are no longer bip125-replaceable") self.nodes[0].generate(1) assert txid_3b not in self.nodes[0].getrawmempool() assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no") From 65332b1178c75e1f83415bad24918996a1524866 Mon Sep 17 00:00:00 2001 From: John Newbery Date: Mon, 19 Jul 2021 12:57:16 +0100 Subject: [PATCH 023/112] [addrman] Remove RemoveInvalid() Instead of deserializing addresses, placing them in the buckets, and then removing them if they're invalid, check first and don't place in the buckets if they're invalid. --- src/addrman.cpp | 32 -------------------------------- src/addrman.h | 15 +++++++-------- 2 files changed, 7 insertions(+), 40 deletions(-) diff --git a/src/addrman.cpp b/src/addrman.cpp index 8192b4eba6..8f702b5a8c 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -77,38 +77,6 @@ double CAddrInfo::GetChance(int64_t nNow) const return fChance; } -void CAddrMan::RemoveInvalid() -{ - for (size_t bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; ++bucket) { - for (size_t i = 0; i < ADDRMAN_BUCKET_SIZE; ++i) { - const auto id = vvNew[bucket][i]; - if (id != -1 && !mapInfo[id].IsValid()) { - ClearNew(bucket, i); - } - } - } - - for (size_t bucket = 0; bucket < ADDRMAN_TRIED_BUCKET_COUNT; ++bucket) { - for (size_t i = 0; i < ADDRMAN_BUCKET_SIZE; ++i) { - const auto id = vvTried[bucket][i]; - if (id == -1) { - continue; - } - const auto& addr_info = mapInfo[id]; - if (addr_info.IsValid()) { - continue; - } - vvTried[bucket][i] = -1; - --nTried; - SwapRandom(addr_info.nRandomPos, vRandom.size() - 1); - vRandom.pop_back(); - mapAddr.erase(addr_info); - mapInfo.erase(id); - m_tried_collisions.erase(id); - } - } -} - CAddrInfo* CAddrMan::Find(const CNetAddr& addr, int* pnId) { AssertLockHeld(cs); diff --git a/src/addrman.h b/src/addrman.h index 6f081b8dc1..e2c6d1d0d3 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -365,7 +365,8 @@ class CAddrMan s >> info; int nKBucket = info.GetTriedBucket(nKey, m_asmap); int nKBucketPos = info.GetBucketPosition(nKey, false, nKBucket); - if (vvTried[nKBucket][nKBucketPos] == -1) { + if (info.IsValid() + && vvTried[nKBucket][nKBucketPos] == -1) { info.nRandomPos = vRandom.size(); info.fInTried = true; vRandom.push_back(nIdCount); @@ -419,6 +420,9 @@ class CAddrMan const int entry_index{bucket_entry.second}; CAddrInfo& info = mapInfo[entry_index]; + // Don't store the entry in the new bucket if it's not a valid address for our addrman + if (!info.IsValid()) continue; + // The entry shouldn't appear in more than // ADDRMAN_NEW_BUCKETS_PER_ADDRESS. If it has already, just skip // this bucket_entry. @@ -441,7 +445,7 @@ class CAddrMan } } - // Prune new entries with refcount 0 (as a result of collisions). + // Prune new entries with refcount 0 (as a result of collisions or invalid address). int nLostUnk = 0; for (auto it = mapInfo.cbegin(); it != mapInfo.cend(); ) { if (it->second.fInTried == false && it->second.nRefCount == 0) { @@ -453,11 +457,9 @@ class CAddrMan } } if (nLost + nLostUnk > 0) { - LogPrint(BCLog::ADDRMAN, "addrman lost %i new and %i tried addresses due to collisions\n", nLostUnk, nLost); + LogPrint(BCLog::ADDRMAN, "addrman lost %i new and %i tried addresses due to collisions or invalid addresses\n", nLostUnk, nLost); } - RemoveInvalid(); - Check(); } @@ -770,9 +772,6 @@ class CAddrMan //! Update an entry's service bits. void SetServices_(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs); - //! Remove invalid addresses. - void RemoveInvalid() EXCLUSIVE_LOCKS_REQUIRED(cs); - friend class CAddrManTest; }; From 9190b01d8dcf03b74e9b9e1653688a97ac171b37 Mon Sep 17 00:00:00 2001 From: John Newbery Date: Wed, 31 Mar 2021 18:34:49 +0100 Subject: [PATCH 024/112] [net processing] Add Orphanage empty consistency check When removing the final peer, assert that m_tx_orphanage is empty. --- src/net_processing.cpp | 1 + src/txorphanage.h | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 9c4544df21..c2202c73da 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1194,6 +1194,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) assert(m_outbound_peers_with_protect_from_disconnect == 0); assert(m_wtxid_relay_peers == 0); assert(m_txrequest.Size() == 0); + assert(m_orphanage.Size() == 0); } } // cs_main if (node.fSuccessfullyConnected && misbehavior == 0 && diff --git a/src/txorphanage.h b/src/txorphanage.h index e4266e470a..24c8318f36 100644 --- a/src/txorphanage.h +++ b/src/txorphanage.h @@ -47,6 +47,13 @@ class TxOrphanage { * (ie orphans that may have found their final missing parent, and so should be reconsidered for the mempool) */ void AddChildrenToWorkSet(const CTransaction& tx, std::set& orphan_work_set) const EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans); + /** Return how many entries exist in the orphange */ + size_t Size() LOCKS_EXCLUDED(::g_cs_orphans) + { + LOCK(::g_cs_orphans); + return m_orphans.size(); + } + protected: struct OrphanTx { CTransactionRef tx; From a28bfd1d4cfa523a6abf3832dbfd6183cd546944 Mon Sep 17 00:00:00 2001 From: John Newbery Date: Wed, 31 Mar 2021 18:37:52 +0100 Subject: [PATCH 025/112] [net processing] Default initialize m_stale_tip_check_time --- src/net_processing.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index c2202c73da..6a1cd7f022 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -396,7 +396,8 @@ class PeerManagerImpl final : public PeerManager /** The height of the best chain */ std::atomic m_best_height{-1}; - int64_t m_stale_tip_check_time; //!< Next time to check for stale tip + /** Next time to check for stale tip */ + int64_t m_stale_tip_check_time{0}; /** Whether this node is running in blocks only mode */ const bool m_ignore_incoming_txs; @@ -1393,7 +1394,6 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn m_banman(banman), m_chainman(chainman), m_mempool(pool), - m_stale_tip_check_time(0), m_ignore_incoming_txs(ignore_incoming_txs) { // Initialize global variables that cannot be constructed at startup. From cd9902ac5054c01228d52616bf85f7196364d4ff Mon Sep 17 00:00:00 2001 From: John Newbery Date: Wed, 31 Mar 2021 18:40:47 +0100 Subject: [PATCH 026/112] [net processing] Default initialize recentRejects Now that recentRejects is owned by PeerManagerImpl, and PeerManagerImpl's lifetime is managed by the node context, we can just default initialize recentRejects during object initialization. We can also remove the unique_ptr indirection. --- src/net_processing.cpp | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 6a1cd7f022..0f5f18aada 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -470,7 +470,7 @@ class PeerManagerImpl final : public PeerManager * * Memory used: 1.3 MB */ - std::unique_ptr recentRejects GUARDED_BY(cs_main); + CRollingBloomFilter recentRejects GUARDED_BY(::cs_main){120'000, 0.000'001}; uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); /* @@ -1396,9 +1396,6 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn m_mempool(pool), m_ignore_incoming_txs(ignore_incoming_txs) { - // Initialize global variables that cannot be constructed at startup. - recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); - // Blocks don't typically have more than 4000 transactions, so this should // be at least six blocks (~1 hr) worth of transactions that we can store, // inserting both a txid and wtxid for every observed transaction. @@ -1601,14 +1598,13 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) { - assert(recentRejects); if (m_chainman.ActiveChain().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions // might be now valid, e.g. due to a nLockTime'd tx becoming valid, // or a double-spend. Reset the rejects filter and give those // txs a second chance. hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash(); - recentRejects->reset(); + recentRejects.reset(); } const uint256& hash = gtxid.GetHash(); @@ -1620,7 +1616,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) if (m_recent_confirmed_transactions->contains(hash)) return true; } - return recentRejects->contains(hash) || m_mempool.exists(gtxid); + return recentRejects.contains(hash) || m_mempool.exists(gtxid); } bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) @@ -2239,8 +2235,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set& orphan_work_set) // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 // for concerns around weakening security of unupgraded nodes // if we start doing this too early. - assert(recentRejects); - recentRejects->insert(porphanTx->GetWitnessHash()); + recentRejects.insert(porphanTx->GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, // then we know that the witness was irrelevant to the policy // failure, since this check depends only on the txid @@ -2252,7 +2247,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set& orphan_work_set) if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) { // We only add the txid if it differs from the wtxid, to // avoid wasting entries in the rolling bloom filter. - recentRejects->insert(porphanTx->GetHash()); + recentRejects.insert(porphanTx->GetHash()); } } m_orphanage.EraseTx(orphanHash); @@ -3255,7 +3250,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, std::sort(unique_parents.begin(), unique_parents.end()); unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); for (const uint256& parent_txid : unique_parents) { - if (recentRejects->contains(parent_txid)) { + if (recentRejects.contains(parent_txid)) { fRejectedParents = true; break; } @@ -3296,8 +3291,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // regardless of what witness is provided, we will not accept // this, so we don't need to allow for redownload of this txid // from any of our non-wtxidrelay peers. - recentRejects->insert(tx.GetHash()); - recentRejects->insert(tx.GetWitnessHash()); + recentRejects.insert(tx.GetHash()); + recentRejects.insert(tx.GetWitnessHash()); m_txrequest.ForgetTxHash(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); } @@ -3316,8 +3311,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 // for concerns around weakening security of unupgraded nodes // if we start doing this too early. - assert(recentRejects); - recentRejects->insert(tx.GetWitnessHash()); + recentRejects.insert(tx.GetWitnessHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, // then we know that the witness was irrelevant to the policy @@ -3328,7 +3322,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // transactions are later received (resulting in // parent-fetching by txid via the orphan-handling logic). if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) { - recentRejects->insert(tx.GetHash()); + recentRejects.insert(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetHash()); } if (RecursiveDynamicUsage(*ptx) < 100000) { From 37dcd12d539e4a875581fa049aa0f7fafeb932a4 Mon Sep 17 00:00:00 2001 From: John Newbery Date: Wed, 31 Mar 2021 18:44:18 +0100 Subject: [PATCH 027/112] scripted-diff: Rename recentRejects -BEGIN VERIFY SCRIPT- ren() { sed -i "s:\<$1\>:$2:g" $(git grep -l "\<$1\>" ./src ./test); } ren recentRejects m_recent_rejects -END VERIFY SCRIPT- --- src/net_processing.cpp | 28 ++++++++++++++-------------- test/functional/mempool_reorg.py | 2 +- test/functional/p2p_permissions.py | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 0f5f18aada..94ecbdf983 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -470,7 +470,7 @@ class PeerManagerImpl final : public PeerManager * * Memory used: 1.3 MB */ - CRollingBloomFilter recentRejects GUARDED_BY(::cs_main){120'000, 0.000'001}; + CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000, 0.000'001}; uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); /* @@ -1604,7 +1604,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) // or a double-spend. Reset the rejects filter and give those // txs a second chance. hashRecentRejectsChainTip = m_chainman.ActiveChain().Tip()->GetBlockHash(); - recentRejects.reset(); + m_recent_rejects.reset(); } const uint256& hash = gtxid.GetHash(); @@ -1616,7 +1616,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) if (m_recent_confirmed_transactions->contains(hash)) return true; } - return recentRejects.contains(hash) || m_mempool.exists(gtxid); + return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid); } bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) @@ -2235,7 +2235,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set& orphan_work_set) // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 // for concerns around weakening security of unupgraded nodes // if we start doing this too early. - recentRejects.insert(porphanTx->GetWitnessHash()); + m_recent_rejects.insert(porphanTx->GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, // then we know that the witness was irrelevant to the policy // failure, since this check depends only on the txid @@ -2247,7 +2247,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set& orphan_work_set) if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && porphanTx->GetWitnessHash() != porphanTx->GetHash()) { // We only add the txid if it differs from the wtxid, to // avoid wasting entries in the rolling bloom filter. - recentRejects.insert(porphanTx->GetHash()); + m_recent_rejects.insert(porphanTx->GetHash()); } } m_orphanage.EraseTx(orphanHash); @@ -3250,7 +3250,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, std::sort(unique_parents.begin(), unique_parents.end()); unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); for (const uint256& parent_txid : unique_parents) { - if (recentRejects.contains(parent_txid)) { + if (m_recent_rejects.contains(parent_txid)) { fRejectedParents = true; break; } @@ -3291,8 +3291,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // regardless of what witness is provided, we will not accept // this, so we don't need to allow for redownload of this txid // from any of our non-wtxidrelay peers. - recentRejects.insert(tx.GetHash()); - recentRejects.insert(tx.GetWitnessHash()); + m_recent_rejects.insert(tx.GetHash()); + m_recent_rejects.insert(tx.GetWitnessHash()); m_txrequest.ForgetTxHash(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); } @@ -3311,7 +3311,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // See also comments in https://github.com/bitcoin/bitcoin/pull/18044#discussion_r443419034 // for concerns around weakening security of unupgraded nodes // if we start doing this too early. - recentRejects.insert(tx.GetWitnessHash()); + m_recent_rejects.insert(tx.GetWitnessHash()); m_txrequest.ForgetTxHash(tx.GetWitnessHash()); // If the transaction failed for TX_INPUTS_NOT_STANDARD, // then we know that the witness was irrelevant to the policy @@ -3322,7 +3322,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // transactions are later received (resulting in // parent-fetching by txid via the orphan-handling logic). if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) { - recentRejects.insert(tx.GetHash()); + m_recent_rejects.insert(tx.GetHash()); m_txrequest.ForgetTxHash(tx.GetHash()); } if (RecursiveDynamicUsage(*ptx) < 100000) { @@ -3331,21 +3331,21 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } } - // If a tx has been detected by recentRejects, we will have reached + // If a tx has been detected by m_recent_rejects, we will have reached // this point and the tx will have been ignored. Because we haven't run // the tx through AcceptToMemoryPool, we won't have computed a DoS // score for it or determined exactly why we consider it invalid. // // This means we won't penalize any peer subsequently relaying a DoSy // tx (even if we penalized the first peer who gave it to us) because - // we have to account for recentRejects showing false positives. In + // we have to account for m_recent_rejects showing false positives. In // other words, we shouldn't penalize a peer if we aren't *sure* they // submitted a DoSy tx. // - // Note that recentRejects doesn't just record DoSy or invalid + // Note that m_recent_rejects doesn't just record DoSy or invalid // transactions, but any tx not accepted by the mempool, which may be // due to node policy (vs. consensus). So we can't blanket penalize a - // peer simply for relaying a tx that our recentRejects has caught, + // peer simply for relaying a tx that our m_recent_rejects has caught, // regardless of false positives. if (state.IsInvalid()) { diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py index bcc6aa7bcc..b5086e1df1 100755 --- a/test/functional/mempool_reorg.py +++ b/test/functional/mempool_reorg.py @@ -80,7 +80,7 @@ def run_test(self): self.log.info("Generate a block") last_block = self.nodes[0].generate(1) # Sync blocks, so that peer 1 gets the block before timelock_tx - # Otherwise, peer 1 would put the timelock_tx in recentRejects + # Otherwise, peer 1 would put the timelock_tx in m_recent_rejects self.sync_all() self.log.info("The time-locked transaction can now be spent") diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py index 594a28d662..8b285907c5 100755 --- a/test/functional/p2p_permissions.py +++ b/test/functional/p2p_permissions.py @@ -130,7 +130,7 @@ def check_tx_relay(self): tx.vout[0].nValue += 1 txid = tx.rehash() # Send the transaction twice. The first time, it'll be rejected by ATMP because it conflicts - # with a mempool transaction. The second time, it'll be in the recentRejects filter. + # with a mempool transaction. The second time, it'll be in the m_recent_rejects filter. p2p_rebroadcast_wallet.send_txs_and_test( [tx], self.nodes[1], From fde1bf4f6136638e84cdf9806eedaae08e841bbf Mon Sep 17 00:00:00 2001 From: John Newbery Date: Wed, 31 Mar 2021 18:40:47 +0100 Subject: [PATCH 028/112] [net processing] Default initialize m_recent_confirmed_transactions Now that m_recent_confirmed_transactions is owned by PeerManagerImpl, and PeerManagerImpl's lifetime is managed by the node context, we can just default initialize m_recent_confirmed_transactions during object initialization. We can also remove the unique_ptr indirection. --- src/net_processing.cpp | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 94ecbdf983..c418620859 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -477,9 +477,19 @@ class PeerManagerImpl final : public PeerManager * Filter for transactions that have been recently confirmed. * We use this to avoid requesting transactions that have already been * confirnmed. + * + * Blocks don't typically have more than 4000 transactions, so this should + * be at least six blocks (~1 hr) worth of transactions that we can store, + * inserting both a txid and wtxid for every observed transaction. + * If the number of transactions appearing in a block goes up, or if we are + * seeing getdata requests more than an hour after initial announcement, we + * can increase this number. + * The false positive rate of 1/1M should come out to less than 1 + * transaction per day that would be inadvertently ignored (which is the + * same probability that we have in the reject filter). */ Mutex m_recent_confirmed_transactions_mutex; - std::unique_ptr m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex); + CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001}; /** Have we requested this block from a peer */ bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -1396,17 +1406,6 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn m_mempool(pool), m_ignore_incoming_txs(ignore_incoming_txs) { - // Blocks don't typically have more than 4000 transactions, so this should - // be at least six blocks (~1 hr) worth of transactions that we can store, - // inserting both a txid and wtxid for every observed transaction. - // If the number of transactions appearing in a block goes up, or if we are - // seeing getdata requests more than an hour after initial announcement, we - // can increase this number. - // The false positive rate of 1/1M should come out to less than 1 - // transaction per day that would be inadvertently ignored (which is the - // same probability that we have in the reject filter). - m_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); - // Stale tip checking and peer eviction are on two different timers, but we // don't want them to get out of sync due to drift in the scheduler, so we // combine them in one function and schedule at the quicker (peer-eviction) @@ -1432,9 +1431,9 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr& pblock { LOCK(m_recent_confirmed_transactions_mutex); for (const auto& ptx : pblock->vtx) { - m_recent_confirmed_transactions->insert(ptx->GetHash()); + m_recent_confirmed_transactions.insert(ptx->GetHash()); if (ptx->GetHash() != ptx->GetWitnessHash()) { - m_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); + m_recent_confirmed_transactions.insert(ptx->GetWitnessHash()); } } } @@ -1458,7 +1457,7 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr &blo // presumably the most common case of relaying a confirmed transaction // should be just after a new block containing it is found. LOCK(m_recent_confirmed_transactions_mutex); - m_recent_confirmed_transactions->reset(); + m_recent_confirmed_transactions.reset(); } // All of the following cache a recent block, and are protected by cs_most_recent_block @@ -1613,7 +1612,7 @@ bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) { LOCK(m_recent_confirmed_transactions_mutex); - if (m_recent_confirmed_transactions->contains(hash)) return true; + if (m_recent_confirmed_transactions.contains(hash)) return true; } return m_recent_rejects.contains(hash) || m_mempool.exists(gtxid); From fa02934c8c9d290ea4d12683e8680c70967a4d3a Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 13 May 2021 12:13:27 +0200 Subject: [PATCH 029/112] refactor: Mark CAddrMan::Select const --- src/addrman.cpp | 12 +++++++----- src/addrman.h | 10 +++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/src/addrman.cpp b/src/addrman.cpp index 8192b4eba6..6cbcbed839 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -410,7 +410,7 @@ void CAddrMan::Attempt_(const CService& addr, bool fCountFailure, int64_t nTime) } } -CAddrInfo CAddrMan::Select_(bool newOnly) +CAddrInfo CAddrMan::Select_(bool newOnly) const { AssertLockHeld(cs); @@ -433,8 +433,9 @@ CAddrInfo CAddrMan::Select_(bool newOnly) nKBucketPos = (nKBucketPos + insecure_rand.randbits(ADDRMAN_BUCKET_SIZE_LOG2)) % ADDRMAN_BUCKET_SIZE; } int nId = vvTried[nKBucket][nKBucketPos]; - assert(mapInfo.count(nId) == 1); - CAddrInfo& info = mapInfo[nId]; + const auto it_found{mapInfo.find(nId)}; + assert(it_found != mapInfo.end()); + const CAddrInfo& info{it_found->second}; if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30)) return info; fChanceFactor *= 1.2; @@ -450,8 +451,9 @@ CAddrInfo CAddrMan::Select_(bool newOnly) nUBucketPos = (nUBucketPos + insecure_rand.randbits(ADDRMAN_BUCKET_SIZE_LOG2)) % ADDRMAN_BUCKET_SIZE; } int nId = vvNew[nUBucket][nUBucketPos]; - assert(mapInfo.count(nId) == 1); - CAddrInfo& info = mapInfo[nId]; + const auto it_found{mapInfo.find(nId)}; + assert(it_found != mapInfo.end()); + const CAddrInfo& info{it_found->second}; if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30)) return info; fChanceFactor *= 1.2; diff --git a/src/addrman.h b/src/addrman.h index 1fc64ac07f..02b8c5d9a2 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -579,7 +579,7 @@ class CAddrMan /** * Choose an address to connect to. */ - CAddrInfo Select(bool newOnly = false) + CAddrInfo Select(bool newOnly = false) const EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); @@ -631,7 +631,7 @@ class CAddrMan uint256 nKey; //! Source of random numbers for randomization in inner loops - FastRandomContext insecure_rand; + mutable FastRandomContext insecure_rand; private: //! A mutex to protect the inner data structures. @@ -718,7 +718,7 @@ class CAddrMan void Attempt_(const CService &addr, bool fCountFailure, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs); //! Select an address to connect to, if newOnly is set to true, only the new table is selected from. - CAddrInfo Select_(bool newOnly) EXCLUSIVE_LOCKS_REQUIRED(cs); + CAddrInfo Select_(bool newOnly) const EXCLUSIVE_LOCKS_REQUIRED(cs); //! See if any to-be-evicted tried table entries have been tested and if so resolve the collisions. void ResolveCollisions_() EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -727,7 +727,7 @@ class CAddrMan CAddrInfo SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs); //! Consistency check - void Check() + void Check() const EXCLUSIVE_LOCKS_REQUIRED(cs) { #ifdef DEBUG_ADDRMAN @@ -741,7 +741,7 @@ class CAddrMan #ifdef DEBUG_ADDRMAN //! Perform consistency check. Returns an error code or zero. - int Check_() EXCLUSIVE_LOCKS_REQUIRED(cs); + int Check_() const EXCLUSIVE_LOCKS_REQUIRED(cs); #endif /** From fae0c79351ce34186249d44af0c5c9c7521f4b6c Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 13 May 2021 13:43:36 +0200 Subject: [PATCH 030/112] refactor: Mark CAddrMan::GetAddr const --- src/addrman.cpp | 19 +++++++++++-------- src/addrman.h | 12 +++++++----- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/addrman.cpp b/src/addrman.cpp index 6cbcbed839..db5166be44 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -138,7 +138,7 @@ CAddrInfo* CAddrMan::Create(const CAddress& addr, const CNetAddr& addrSource, in return &mapInfo[nId]; } -void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) +void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) const { AssertLockHeld(cs); @@ -150,11 +150,13 @@ void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) int nId1 = vRandom[nRndPos1]; int nId2 = vRandom[nRndPos2]; - assert(mapInfo.count(nId1) == 1); - assert(mapInfo.count(nId2) == 1); + const auto it_1{mapInfo.find(nId1)}; + const auto it_2{mapInfo.find(nId2)}; + assert(it_1 != mapInfo.end()); + assert(it_2 != mapInfo.end()); - mapInfo[nId1].nRandomPos = nRndPos2; - mapInfo[nId2].nRandomPos = nRndPos1; + it_1->second.nRandomPos = nRndPos2; + it_2->second.nRandomPos = nRndPos1; vRandom[nRndPos1] = nId2; vRandom[nRndPos2] = nId1; @@ -541,7 +543,7 @@ int CAddrMan::Check_() } #endif -void CAddrMan::GetAddr_(std::vector& vAddr, size_t max_addresses, size_t max_pct, std::optional network) +void CAddrMan::GetAddr_(std::vector& vAddr, size_t max_addresses, size_t max_pct, std::optional network) const { AssertLockHeld(cs); @@ -561,9 +563,10 @@ void CAddrMan::GetAddr_(std::vector& vAddr, size_t max_addresses, size int nRndPos = insecure_rand.randrange(vRandom.size() - n) + n; SwapRandom(n, nRndPos); - assert(mapInfo.count(vRandom[n]) == 1); + const auto it{mapInfo.find(vRandom[n])}; + assert(it != mapInfo.end()); - const CAddrInfo& ai = mapInfo[vRandom[n]]; + const CAddrInfo& ai{it->second}; // Filter by network (optional) if (network != std::nullopt && ai.GetNetClass() != network) continue; diff --git a/src/addrman.h b/src/addrman.h index 02b8c5d9a2..d4f67c2f5b 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -55,7 +55,7 @@ class CAddrInfo : public CAddress bool fInTried{false}; //! position in vRandom - int nRandomPos{-1}; + mutable int nRandomPos{-1}; friend class CAddrMan; @@ -596,7 +596,7 @@ class CAddrMan * @param[in] max_pct Maximum percentage of addresses to return (0 = all). * @param[in] network Select only addresses of this network (nullopt = all). */ - std::vector GetAddr(size_t max_addresses, size_t max_pct, std::optional network) + std::vector GetAddr(size_t max_addresses, size_t max_pct, std::optional network) const EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); @@ -669,7 +669,9 @@ class CAddrMan std::unordered_map mapAddr GUARDED_BY(cs); //! randomly-ordered vector of all nIds - std::vector vRandom GUARDED_BY(cs); + //! This is mutable because it is unobservable outside the class, so any + //! changes to it (even in const methods) are also unobservable. + mutable std::vector vRandom GUARDED_BY(cs); // number of "tried" entries int nTried GUARDED_BY(cs); @@ -697,7 +699,7 @@ class CAddrMan CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); //! Swap two elements in vRandom. - void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) EXCLUSIVE_LOCKS_REQUIRED(cs); + void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) const EXCLUSIVE_LOCKS_REQUIRED(cs); //! Move an entry from the "new" table(s) to the "tried" table void MakeTried(CAddrInfo& info, int nId) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -752,7 +754,7 @@ class CAddrMan * @param[in] max_pct Maximum percentage of addresses to return (0 = all). * @param[in] network Select only addresses of this network (nullopt = all). */ - void GetAddr_(std::vector& vAddr, size_t max_addresses, size_t max_pct, std::optional network) EXCLUSIVE_LOCKS_REQUIRED(cs); + void GetAddr_(std::vector& vAddr, size_t max_addresses, size_t max_pct, std::optional network) const EXCLUSIVE_LOCKS_REQUIRED(cs); /** We have successfully connected to this peer. Calling this function * updates the CAddress's nTime, which is used in our IsTerrible() From fab755b77f88873f01cbd988051de7ad3f0150de Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Mon, 14 Jun 2021 09:40:51 +0200 Subject: [PATCH 031/112] fuzz: Actually use const addrman --- src/test/fuzz/addrman.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index 8513f1e6df..ee9485eba5 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -103,11 +103,11 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman) }); } const CAddrMan& const_addr_man{addr_man}; - (void)/*const_*/addr_man.GetAddr( + (void)const_addr_man.GetAddr( /* max_addresses */ fuzzed_data_provider.ConsumeIntegralInRange(0, 4096), /* max_pct */ fuzzed_data_provider.ConsumeIntegralInRange(0, 4096), /* network */ std::nullopt); - (void)/*const_*/addr_man.Select(fuzzed_data_provider.ConsumeBool()); + (void)const_addr_man.Select(fuzzed_data_provider.ConsumeBool()); (void)const_addr_man.size(); CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION); data_stream << const_addr_man; From 0a9129c588ab016eb0453b40a0cae918ca4aa6a2 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Wed, 21 Jul 2021 17:05:01 +0200 Subject: [PATCH 032/112] test: assert on the value of getblockchaininfo#time --- test/functional/rpc_blockchain.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index f7290ff229..01f5ce7eb0 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -49,6 +49,10 @@ from test_framework.wallet import MiniWallet +TIME_RANGE_STEP = 600 # ten-minute steps +TIME_RANGE_END = TIME_GENESIS_BLOCK + 200 * TIME_RANGE_STEP + + class BlockchainTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True @@ -71,9 +75,8 @@ def run_test(self): assert self.nodes[0].verifychain(4, 0) def mine_chain(self): - self.log.info('Create some old blocks') - for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600): - # ten-minute steps from genesis block time + self.log.info("Generate 200 blocks after the genesis block in ten-minute steps") + for t in range(TIME_GENESIS_BLOCK, TIME_RANGE_END, TIME_RANGE_STEP): self.nodes[0].setmocktime(t) self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_P2WSH_OP_TRUE) assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200) @@ -99,7 +102,7 @@ def _test_getblockchaininfo(self): ] res = self.nodes[0].getblockchaininfo() - assert isinstance(res['time'], int) + assert_equal(res['time'], TIME_RANGE_END - TIME_RANGE_STEP) # result should have these additional pruning keys if manual pruning is enabled assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys)) From 78c361086fc0bf27612e8142bd33e05e37a36af6 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Wed, 21 Jul 2021 17:07:18 +0200 Subject: [PATCH 033/112] test: assert on mediantime in getblockheader and getblockchaininfo --- test/functional/rpc_blockchain.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 01f5ce7eb0..524268ecba 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -50,6 +50,7 @@ TIME_RANGE_STEP = 600 # ten-minute steps +TIME_RANGE_MTP = TIME_GENESIS_BLOCK + 194 * TIME_RANGE_STEP TIME_RANGE_END = TIME_GENESIS_BLOCK + 200 * TIME_RANGE_STEP @@ -103,6 +104,7 @@ def _test_getblockchaininfo(self): res = self.nodes[0].getblockchaininfo() assert_equal(res['time'], TIME_RANGE_END - TIME_RANGE_STEP) + assert_equal(res['mediantime'], TIME_RANGE_MTP) # result should have these additional pruning keys if manual pruning is enabled assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys)) @@ -310,7 +312,7 @@ def _test_getblockheader(self): assert_is_hash_string(header['merkleroot']) assert_is_hash_string(header['bits'], length=None) assert isinstance(header['time'], int) - assert isinstance(header['mediantime'], int) + assert_equal(header['mediantime'], TIME_RANGE_MTP) assert isinstance(header['nonce'], int) assert isinstance(header['version'], int) assert isinstance(int(header['versionHex'], 16), int) From d548dc71e4849f638fccaea6be86ac4fa5304f01 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Wed, 21 Jul 2021 17:15:06 +0200 Subject: [PATCH 034/112] test: replace magic values by constants in rpc_blockchain.py --- test/functional/rpc_blockchain.py | 49 ++++++++++++++++--------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 524268ecba..2b604b7b23 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -49,9 +49,10 @@ from test_framework.wallet import MiniWallet +HEIGHT = 200 # blocks mined TIME_RANGE_STEP = 600 # ten-minute steps -TIME_RANGE_MTP = TIME_GENESIS_BLOCK + 194 * TIME_RANGE_STEP -TIME_RANGE_END = TIME_GENESIS_BLOCK + 200 * TIME_RANGE_STEP +TIME_RANGE_MTP = TIME_GENESIS_BLOCK + (HEIGHT - 6) * TIME_RANGE_STEP +TIME_RANGE_END = TIME_GENESIS_BLOCK + HEIGHT * TIME_RANGE_STEP class BlockchainTest(BitcoinTestFramework): @@ -76,11 +77,11 @@ def run_test(self): assert self.nodes[0].verifychain(4, 0) def mine_chain(self): - self.log.info("Generate 200 blocks after the genesis block in ten-minute steps") + self.log.info(f"Generate {HEIGHT} blocks after the genesis block in ten-minute steps") for t in range(TIME_GENESIS_BLOCK, TIME_RANGE_END, TIME_RANGE_STEP): self.nodes[0].setmocktime(t) self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_P2WSH_OP_TRUE) - assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], HEIGHT) def _test_getblockchaininfo(self): self.log.info("Test getblockchaininfo") @@ -153,8 +154,8 @@ def _test_getblockchaininfo(self): 'statistics': { 'period': 144, 'threshold': 108, - 'elapsed': 57, - 'count': 57, + 'elapsed': HEIGHT - 143, + 'count': HEIGHT - 143, 'possible': True, }, 'min_activation_height': 0, @@ -191,33 +192,33 @@ def _test_getchaintxstats(self): assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0') assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000') assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000') - blockhash = self.nodes[0].getblockhash(200) + blockhash = self.nodes[0].getblockhash(HEIGHT) self.nodes[0].invalidateblock(blockhash) assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash) self.nodes[0].reconsiderblock(blockhash) chaintxstats = self.nodes[0].getchaintxstats(nblocks=1) # 200 txs plus genesis tx - assert_equal(chaintxstats['txcount'], 201) + assert_equal(chaintxstats['txcount'], HEIGHT + 1) # tx rate should be 1 per 10 minutes, or 1/600 # we have to round because of binary math - assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1)) + assert_equal(round(chaintxstats['txrate'] * TIME_RANGE_STEP, 10), Decimal(1)) b1_hash = self.nodes[0].getblockhash(1) b1 = self.nodes[0].getblock(b1_hash) - b200_hash = self.nodes[0].getblockhash(200) + b200_hash = self.nodes[0].getblockhash(HEIGHT) b200 = self.nodes[0].getblock(b200_hash) time_diff = b200['mediantime'] - b1['mediantime'] chaintxstats = self.nodes[0].getchaintxstats() assert_equal(chaintxstats['time'], b200['time']) - assert_equal(chaintxstats['txcount'], 201) + assert_equal(chaintxstats['txcount'], HEIGHT + 1) assert_equal(chaintxstats['window_final_block_hash'], b200_hash) - assert_equal(chaintxstats['window_final_block_height'], 200) - assert_equal(chaintxstats['window_block_count'], 199) - assert_equal(chaintxstats['window_tx_count'], 199) + assert_equal(chaintxstats['window_final_block_height'], HEIGHT ) + assert_equal(chaintxstats['window_block_count'], HEIGHT - 1) + assert_equal(chaintxstats['window_tx_count'], HEIGHT - 1) assert_equal(chaintxstats['window_interval'], time_diff) - assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199)) + assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(HEIGHT - 1)) chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash) assert_equal(chaintxstats['time'], b1['time']) @@ -234,11 +235,11 @@ def _test_gettxoutsetinfo(self): res = node.gettxoutsetinfo() assert_equal(res['total_amount'], Decimal('8725.00000000')) - assert_equal(res['transactions'], 200) - assert_equal(res['height'], 200) - assert_equal(res['txouts'], 200) + assert_equal(res['transactions'], HEIGHT) + assert_equal(res['height'], HEIGHT) + assert_equal(res['txouts'], HEIGHT) assert_equal(res['bogosize'], 16800), - assert_equal(res['bestblock'], node.getblockhash(200)) + assert_equal(res['bestblock'], node.getblockhash(HEIGHT)) size = res['disk_size'] assert size > 6400 assert size < 64000 @@ -298,11 +299,11 @@ def _test_getblockheader(self): assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844") besthash = node.getbestblockhash() - secondbesthash = node.getblockhash(199) + secondbesthash = node.getblockhash(HEIGHT - 1) header = node.getblockheader(blockhash=besthash) assert_equal(header['hash'], besthash) - assert_equal(header['height'], 200) + assert_equal(header['height'], HEIGHT) assert_equal(header['confirmations'], 1) assert_equal(header['previousblockhash'], secondbesthash) assert_is_hex_string(header['chainwork']) @@ -341,9 +342,9 @@ def _test_getnetworkhashps(self): assert abs(hashes_per_second * 300 - 1) < 0.0001 def _test_stopatheight(self): - assert_equal(self.nodes[0].getblockcount(), 200) + assert_equal(self.nodes[0].getblockcount(), HEIGHT) self.nodes[0].generatetoaddress(6, ADDRESS_BCRT1_P2WSH_OP_TRUE) - assert_equal(self.nodes[0].getblockcount(), 206) + assert_equal(self.nodes[0].getblockcount(), HEIGHT + 6) self.log.debug('Node should not stop at this height') assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3)) try: @@ -353,7 +354,7 @@ def _test_stopatheight(self): self.log.debug('Node should stop at this height...') self.nodes[0].wait_until_stopped() self.start_node(0) - assert_equal(self.nodes[0].getblockcount(), 207) + assert_equal(self.nodes[0].getblockcount(), HEIGHT + 7) def _test_waitforblockheight(self): self.log.info("Test waitforblockheight") From ef5e9304cd407adab1563f24215da1b582274c20 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Wed, 21 Jul 2021 18:11:19 +0200 Subject: [PATCH 035/112] test: update logging and docstring in rpc_blockchain.py --- test/functional/rpc_blockchain.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 2b604b7b23..794b2e5e08 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -6,13 +6,15 @@ Test the following RPCs: - getblockchaininfo + - getchaintxstats - gettxoutsetinfo - - getdifficulty - - getbestblockhash - - getblockhash - getblockheader - - getchaintxstats + - getdifficulty - getnetworkhashps + - waitforblockheight + - getblock + - getblockhash + - getbestblockhash - verifychain Tests correspond to code in rpc/blockchain.cpp. @@ -246,7 +248,7 @@ def _test_gettxoutsetinfo(self): assert_equal(len(res['bestblock']), 64) assert_equal(len(res['hash_serialized_2']), 64) - self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block") + self.log.info("Test gettxoutsetinfo works for blockchain with just the genesis block") b1hash = node.getblockhash(1) node.invalidateblock(b1hash) @@ -259,7 +261,7 @@ def _test_gettxoutsetinfo(self): assert_equal(res2['bestblock'], node.getblockhash(0)) assert_equal(len(res2['hash_serialized_2']), 64) - self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block") + self.log.info("Test gettxoutsetinfo returns the same result after invalidate/reconsider block") node.reconsiderblock(b1hash) res3 = node.gettxoutsetinfo() @@ -268,7 +270,7 @@ def _test_gettxoutsetinfo(self): del res['disk_size'], res3['disk_size'] assert_equal(res, res3) - self.log.info("Test hash_type option for gettxoutsetinfo()") + self.log.info("Test gettxoutsetinfo hash_type option") # Adding hash_type 'hash_serialized_2', which is the default, should # not change the result. res4 = node.gettxoutsetinfo(hash_type='hash_serialized_2') @@ -292,6 +294,7 @@ def _test_gettxoutsetinfo(self): assert_raises_rpc_error(-8, "foohash is not a valid hash_type", node.gettxoutsetinfo, "foohash") def _test_getblockheader(self): + self.log.info("Test getblockheader") node = self.nodes[0] assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense") @@ -331,17 +334,20 @@ def _test_getblockheader(self): assert 'nextblockhash' not in node.getblockheader(node.getbestblockhash()) def _test_getdifficulty(self): + self.log.info("Test getdifficulty") difficulty = self.nodes[0].getdifficulty() # 1 hash in 2 should be valid, so difficulty should be 1/2**31 # binary => decimal => binary math is why we do this check assert abs(difficulty * 2**31 - 1) < 0.0001 def _test_getnetworkhashps(self): + self.log.info("Test getnetworkhashps") hashes_per_second = self.nodes[0].getnetworkhashps() # This should be 2 hashes every 10 minutes or 1/300 assert abs(hashes_per_second * 300 - 1) < 0.0001 def _test_stopatheight(self): + self.log.info("Test stopping at height") assert_equal(self.nodes[0].getblockcount(), HEIGHT) self.nodes[0].generatetoaddress(6, ADDRESS_BCRT1_P2WSH_OP_TRUE) assert_equal(self.nodes[0].getblockcount(), HEIGHT + 6) @@ -406,20 +412,20 @@ def _test_getblock(self): miniwallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node) blockhash = node.generate(1)[0] - self.log.info("Test that getblock with verbosity 1 doesn't include fee") + self.log.info("Test getblock with verbosity 1 doesn't include fee") block = node.getblock(blockhash, 1) assert 'fee' not in block['tx'][1] - self.log.info('Test that getblock with verbosity 2 includes expected fee') + self.log.info('Test getblock with verbosity 2 includes expected fee') block = node.getblock(blockhash, 2) tx = block['tx'][1] assert 'fee' in tx assert_equal(tx['fee'], tx['vsize'] * fee_per_byte) - self.log.info("Test that getblock with verbosity 2 still works with pruned Undo data") + self.log.info("Test getblock with verbosity 2 still works with pruned Undo data") datadir = get_datadir_path(self.options.tmpdir, 0) - self.log.info("Test that getblock with invalid verbosity type returns proper error message") + self.log.info("Test getblock with invalid verbosity type returns proper error message") assert_raises_rpc_error(-1, "JSON value is not an integer as expected", node.getblock, blockhash, "2") def move_block_file(old, new): From abc57e1f0882a1a2bb20474648419979af6e383d Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Thu, 22 Jul 2021 15:01:14 +0200 Subject: [PATCH 036/112] refactor: move `GetTransaction(...)` to node/transaction.cpp can be reviewed with --color-moved --- src/node/transaction.cpp | 35 +++++++++++++++++++++++++ src/node/transaction.h | 20 ++++++++++++++ src/validation.cpp | 33 ----------------------- src/validation.h | 15 +---------- test/lint/lint-circular-dependencies.sh | 1 - 5 files changed, 56 insertions(+), 48 deletions(-) diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index f21b390915..0227618edf 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -4,9 +4,12 @@ // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include +#include #include #include +#include #include +#include #include #include #include @@ -104,3 +107,35 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t return TransactionError::OK; } + +CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock) +{ + LOCK(cs_main); + + if (mempool && !block_index) { + CTransactionRef ptx = mempool->get(hash); + if (ptx) return ptx; + } + if (g_txindex) { + CTransactionRef tx; + uint256 block_hash; + if (g_txindex->FindTx(hash, block_hash, tx)) { + if (!block_index || block_index->GetBlockHash() == block_hash) { + hashBlock = block_hash; + return tx; + } + } + } + if (block_index) { + CBlock block; + if (ReadBlockFromDisk(block, block_index, consensusParams)) { + for (const auto& tx : block.vtx) { + if (tx->GetHash() == hash) { + hashBlock = block_index->GetBlockHash(); + return tx; + } + } + } + } + return nullptr; +} diff --git a/src/node/transaction.h b/src/node/transaction.h index 0c016ff04e..aed519cf7f 100644 --- a/src/node/transaction.h +++ b/src/node/transaction.h @@ -10,7 +10,12 @@ #include #include +class CBlockIndex; +class CTxMemPool; struct NodeContext; +namespace Consensus { +struct Params; +} /** Maximum fee rate for sendrawtransaction and testmempoolaccept RPC calls. * Also used by the GUI when broadcasting a completed PSBT. @@ -38,4 +43,19 @@ static const CFeeRate DEFAULT_MAX_RAW_TX_FEE_RATE{COIN / 10}; */ [[nodiscard]] TransactionError BroadcastTransaction(NodeContext& node, CTransactionRef tx, std::string& err_string, const CAmount& max_tx_fee, bool relay, bool wait_callback); +/** + * Return transaction with a given hash. + * If mempool is provided and block_index is not provided, check it first for the tx. + * If -txindex is available, check it next for the tx. + * Finally, if block_index is provided, check for tx by reading entire block from disk. + * + * @param[in] block_index The block to read from disk, or nullptr + * @param[in] mempool If provided, check mempool for tx + * @param[in] hash The txid + * @param[in] consensusParams The params + * @param[out] hashBlock The block hash, if the tx was found via -txindex or block_index + * @returns The tx if found, otherwise nullptr + */ +CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock); + #endif // BITCOIN_NODE_TRANSACTION_H diff --git a/src/validation.cpp b/src/validation.cpp index 6a145a088a..3e5cc1e77c 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -1155,38 +1154,6 @@ PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTx return result; } -CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock) -{ - LOCK(cs_main); - - if (mempool && !block_index) { - CTransactionRef ptx = mempool->get(hash); - if (ptx) return ptx; - } - if (g_txindex) { - CTransactionRef tx; - uint256 block_hash; - if (g_txindex->FindTx(hash, block_hash, tx)) { - if (!block_index || block_index->GetBlockHash() == block_hash) { - hashBlock = block_hash; - return tx; - } - } - } - if (block_index) { - CBlock block; - if (ReadBlockFromDisk(block, block_index, consensusParams)) { - for (const auto& tx : block.vtx) { - if (tx->GetHash() == hash) { - hashBlock = block_index->GetBlockHash(); - return tx; - } - } - } - } - return nullptr; -} - CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) { int halvings = nHeight / consensusParams.nSubsidyHalvingInterval; diff --git a/src/validation.h b/src/validation.h index 0cebf3967d..573246ddb6 100644 --- a/src/validation.h +++ b/src/validation.h @@ -141,20 +141,7 @@ void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman); void StartScriptCheckWorkerThreads(int threads_num); /** Stop all of the script checking worker threads */ void StopScriptCheckWorkerThreads(); -/** - * Return transaction with a given hash. - * If mempool is provided and block_index is not provided, check it first for the tx. - * If -txindex is available, check it next for the tx. - * Finally, if block_index is provided, check for tx by reading entire block from disk. - * - * @param[in] block_index The block to read from disk, or nullptr - * @param[in] mempool If provided, check mempool for tx - * @param[in] hash The txid - * @param[in] consensusParams The params - * @param[out] hashBlock The block hash, if the tx was found via -txindex or block_index - * @returns The tx if found, otherwise nullptr - */ -CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock); + CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams); bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str{}); diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh index f8f24bb1ff..df5051720b 100755 --- a/test/lint/lint-circular-dependencies.sh +++ b/test/lint/lint-circular-dependencies.sh @@ -10,7 +10,6 @@ export LC_ALL=C EXPECTED_CIRCULAR_DEPENDENCIES=( "chainparamsbase -> util/system -> chainparamsbase" - "index/txindex -> validation -> index/txindex" "node/blockstorage -> validation -> node/blockstorage" "index/blockfilterindex -> node/blockstorage -> validation -> index/blockfilterindex" "index/base -> validation -> index/blockfilterindex -> index/base" From f720cfa824f1be863349e7016080f8fb1c3c76c2 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Thu, 22 Jul 2021 17:37:37 +0200 Subject: [PATCH 037/112] test: verify number of categories returned by logging RPC --- test/functional/rpc_misc.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py index 52c8fa883d..9ad7827c8a 100755 --- a/test/functional/rpc_misc.py +++ b/test/functional/rpc_misc.py @@ -54,7 +54,12 @@ def run_test(self): assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar") - self.log.info("test logging") + self.log.info("test logging rpc") + + # Test logging RPC returns the expected number of logging categories. + assert_equal(len(node.logging()), 24) + + # Test toggling a logging category on/off/on with the logging RPC. assert_equal(node.logging()['qt'], True) node.logging(exclude=['qt']) assert_equal(node.logging()['qt'], False) From f685a13bef0418663015ea6d8f448f075510c0ec Mon Sep 17 00:00:00 2001 From: John Newbery Date: Thu, 22 Jul 2021 20:32:04 +0200 Subject: [PATCH 038/112] doc: GetTransaction()/getrawtransaction follow-ups to #22383 --- src/node/transaction.cpp | 3 +++ src/rpc/rawtransaction.cpp | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index 0227618edf..4de0b302e6 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -121,6 +121,9 @@ CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMe uint256 block_hash; if (g_txindex->FindTx(hash, block_hash, tx)) { if (!block_index || block_index->GetBlockHash() == block_hash) { + // Don't return the transaction if the provided block hash doesn't match. + // The case where a transaction appears in multiple blocks (e.g. reorgs or + // BIP30) is handled by the block lookup below. hashBlock = block_hash; return tx; } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 30addf8af6..ed5e1d8c46 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -76,8 +76,8 @@ static RPCHelpMan getrawtransaction() "\nBy default, this call only returns a transaction if it is in the mempool. If -txindex is enabled\n" "and no blockhash argument is passed, it will return the transaction if it is in the mempool or any block.\n" - "If -txindex is not enabled and a blockhash argument is passed, it will return the transaction if\n" - "the specified block is available and the transaction is found in that block.\n" + "If a blockhash argument is passed, it will return the transaction if\n" + "the specified block is available and the transaction is in that block.\n" "\nHint: Use gettransaction for wallet transactions.\n" "\nIf verbose is 'true', returns an Object with information about 'txid'.\n" From 7c57297319bc386afaf06528778384fe58576ef9 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Thu, 22 Jul 2021 16:35:22 +0200 Subject: [PATCH 039/112] log: sort LogCategoriesList and LogCategoriesString alphabetically --- src/logging.cpp | 9 ++++++++- src/logging.h | 4 ++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/logging.cpp b/src/logging.cpp index e5187fd596..fcb46debf7 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -8,6 +8,8 @@ #include #include +#include +#include #include const char * const DEFAULT_DEBUGLOGFILE = "debug.log"; @@ -179,8 +181,13 @@ bool GetLogCategory(BCLog::LogFlags& flag, const std::string& str) std::vector BCLog::Logger::LogCategoriesList() const { + // Sort log categories by alphabetical order. + std::array categories; + std::copy(std::begin(LogCategories), std::end(LogCategories), categories.begin()); + std::sort(categories.begin(), categories.end(), [](auto a, auto b) { return a.category < b.category; }); + std::vector ret; - for (const CLogCategoryDesc& category_desc : LogCategories) { + for (const CLogCategoryDesc& category_desc : categories) { // Omit the special cases. if (category_desc.flag != BCLog::NONE && category_desc.flag != BCLog::ALL) { LogCategory catActive; diff --git a/src/logging.h b/src/logging.h index d04bc99268..38d73863e7 100644 --- a/src/logging.h +++ b/src/logging.h @@ -138,9 +138,9 @@ namespace BCLog { bool DisableCategory(const std::string& str); bool WillLogCategory(LogFlags category) const; - /** Returns a vector of the log categories */ + /** Returns a vector of the log categories in alphabetical order. */ std::vector LogCategoriesList() const; - /** Returns a string with the log categories */ + /** Returns a string with the log categories in alphabetical order. */ std::string LogCategoriesString() const { return Join(LogCategoriesList(), ", ", [&](const LogCategory& i) { return i.category; }); From 17bbff3b88132c0c95b29b59100456b85e26df75 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Thu, 22 Jul 2021 12:01:21 +0200 Subject: [PATCH 040/112] log, refactor: use guard clause in LogCategoriesList() and minor formatting fixups --- src/logging.cpp | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/logging.cpp b/src/logging.cpp index fcb46debf7..b456108b61 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -126,8 +126,7 @@ bool BCLog::Logger::DefaultShrinkDebugFile() const return m_categories == BCLog::NONE; } -struct CLogCategoryDesc -{ +struct CLogCategoryDesc { BCLog::LogFlags flag; std::string category; }; @@ -188,13 +187,11 @@ std::vector BCLog::Logger::LogCategoriesList() const std::vector ret; for (const CLogCategoryDesc& category_desc : categories) { - // Omit the special cases. - if (category_desc.flag != BCLog::NONE && category_desc.flag != BCLog::ALL) { - LogCategory catActive; - catActive.category = category_desc.category; - catActive.active = WillLogCategory(category_desc.flag); - ret.push_back(catActive); - } + if (category_desc.flag == BCLog::NONE || category_desc.flag == BCLog::ALL) continue; + LogCategory catActive; + catActive.category = category_desc.category; + catActive.active = WillLogCategory(category_desc.flag); + ret.push_back(catActive); } return ret; } @@ -244,7 +241,7 @@ namespace BCLog { } return ret; } -} +} // namespace BCLog void BCLog::Logger::LogPrintStr(const std::string& str, const std::string& logging_function, const std::string& source_file, const int source_line) { From 33455c76964b9e27b33e970d9722cc47657b291b Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Thu, 22 Jul 2021 17:42:59 -0400 Subject: [PATCH 041/112] guix: Make all.SHA256SUMS rather than codesigned.SHA256SUMS --- contrib/guix/guix-attest | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest index 51d589c1de..396cb39895 100755 --- a/contrib/guix/guix-attest +++ b/contrib/guix/guix-attest @@ -207,8 +207,8 @@ mkdir -p "$outsigdir" exit 1 fi - temp_codesigned="$(mktemp)" - trap 'rm -rf -- "$temp_codesigned"' EXIT + temp_all="$(mktemp)" + trap 'rm -rf -- "$temp_all"' EXIT if (( ${#codesigned_fragments[@]} )); then # Note: all.SHA256SUMS attests to all of $sha256sum_fragments, but is @@ -218,18 +218,18 @@ mkdir -p "$outsigdir" | sort -k2 \ | sed 's/$/\r/' \ | rfc4880_normalize_document \ - > "$temp_codesigned" - if [ -e codesigned.SHA256SUMS ]; then + > "$temp_all" + if [ -e all.SHA256SUMS ]; then # The SHA256SUMS already exists, make sure it's exactly what we # expect, error out if not - if diff -u all.SHA256SUMS "$temp_codesigned"; then + if diff -u all.SHA256SUMS "$temp_all"; then echo "An all.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." else shasum_already_exists all.SHA256SUMS exit 1 fi else - mv "$temp_codesigned" codesigned.SHA256SUMS + mv "$temp_all" all.SHA256SUMS fi else # It is fine to have the codesigned outputs be missing (perhaps the From 4a466388a0092fbdf5f8969c6bfb65bf8cc962e1 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Thu, 22 Jul 2021 13:25:57 -0400 Subject: [PATCH 042/112] guix: Allow changing the base manifest in guix-verify When verifying guix attestations, it is useful to set a particular signer's manifest as the base to compare against. --- contrib/guix/guix-verify | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/contrib/guix/guix-verify b/contrib/guix/guix-verify index a6e2c4065e..e4863f115b 100755 --- a/contrib/guix/guix-verify +++ b/contrib/guix/guix-verify @@ -28,7 +28,11 @@ cmd_usage() { cat < ./contrib/guix/guix-verify + env GUIX_SIGS_REPO= [ SIGNER= ] ./contrib/guix/guix-verify + +Example overriding signer's manifest to use as base + + env GUIX_SIGS_REPO=/home/dongcarl/guix.sigs SIGNER=achow101 ./contrib/guix/guix-verify EOF } @@ -92,6 +96,17 @@ echo "--------------------" echo "" if (( ${#all_noncodesigned[@]} )); then compare_noncodesigned="${all_noncodesigned[0]}" + if [[ -n "$SIGNER" ]]; then + signer_noncodesigned="$OUTSIGDIR_BASE/$SIGNER/noncodesigned.SHA256SUMS" + if [[ -f "$signer_noncodesigned" ]]; then + echo "Using $SIGNER's manifest as the base to compare against" + compare_noncodesigned="$signer_noncodesigned" + else + echo "Unable to find $SIGNER's manifest, using the first one found" + fi + else + echo "No SIGNER provided, using the first manifest found" + fi for current_manifest in "${all_noncodesigned[@]}"; do verify "$compare_noncodesigned" "$current_manifest" @@ -112,6 +127,17 @@ echo "--------------------" echo "" if (( ${#all_all[@]} )); then compare_all="${all_all[0]}" + if [[ -n "$SIGNER" ]]; then + signer_all="$OUTSIGDIR_BASE/$SIGNER/all.SHA256SUMS" + if [[ -f "$signer_all" ]]; then + echo "Using $SIGNER's manifest as the base to compare against" + compare_all="$signer_all" + else + echo "Unable to find $SIGNER's manifest, using the first one found" + fi + else + echo "No SIGNER provided, using the first manifest found" + fi for current_manifest in "${all_all[@]}"; do verify "$compare_all" "$current_manifest" From fa32024d51c098441623e246f304a80f011e29d1 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 22 Jul 2021 20:56:27 +0200 Subject: [PATCH 043/112] Add missing GUARDED_BY to CAddrMan::insecure_rand --- src/addrman.h | 4 ++-- src/test/addrman_tests.cpp | 3 ++- src/test/fuzz/addrman.cpp | 2 +- src/test/net_tests.cpp | 1 + 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/addrman.h b/src/addrman.h index d4f67c2f5b..6347a24d55 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -631,12 +631,12 @@ class CAddrMan uint256 nKey; //! Source of random numbers for randomization in inner loops - mutable FastRandomContext insecure_rand; + mutable FastRandomContext insecure_rand GUARDED_BY(cs); -private: //! A mutex to protect the inner data structures. mutable Mutex cs; +private: //! Serialization versions. enum Format : uint8_t { V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88 diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index 5e5c5eba69..79c7102c4f 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -34,6 +34,7 @@ class CAddrManTest : public CAddrMan //! Ensure that bucket placement is always the same for testing purposes. void MakeDeterministic() { + LOCK(cs); nKey.SetNull(); insecure_rand = FastRandomContext(true); } @@ -87,11 +88,11 @@ class CAddrManTest : public CAddrMan { CAddrMan::Clear(); if (deterministic) { + LOCK(cs); nKey.SetNull(); insecure_rand = FastRandomContext(true); } } - }; static CNetAddr ResolveIP(const std::string& ip) diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index ee9485eba5..fb003ea3a8 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -27,7 +27,7 @@ class CAddrManDeterministic : public CAddrMan public: void MakeDeterministic(const uint256& random_seed) { - insecure_rand = FastRandomContext{random_seed}; + WITH_LOCK(cs, insecure_rand = FastRandomContext{random_seed}); Clear(); } }; diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 46f88c1282..acbbf357d2 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -37,6 +37,7 @@ class CAddrManSerializationMock : public CAddrMan //! Ensure that bucket placement is always the same for testing purposes. void MakeDeterministic() { + LOCK(cs); nKey.SetNull(); insecure_rand = FastRandomContext(true); } From fae108ceb53f61d7338ba205873623ede3c1d3be Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 22 Jul 2021 20:58:10 +0200 Subject: [PATCH 044/112] Fix incorrect whitespace in addrman Leaving it as-is would be annoying because some editor fix-up the spacing when opening a file or editing it. --- src/addrman.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/addrman.cpp b/src/addrman.cpp index db5166be44..91b6d3fc83 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -507,15 +507,15 @@ int CAddrMan::Check_() for (int n = 0; n < ADDRMAN_TRIED_BUCKET_COUNT; n++) { for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) { - if (vvTried[n][i] != -1) { - if (!setTried.count(vvTried[n][i])) - return -11; - if (mapInfo[vvTried[n][i]].GetTriedBucket(nKey, m_asmap) != n) - return -17; - if (mapInfo[vvTried[n][i]].GetBucketPosition(nKey, false, n) != i) - return -18; - setTried.erase(vvTried[n][i]); - } + if (vvTried[n][i] != -1) { + if (!setTried.count(vvTried[n][i])) + return -11; + if (mapInfo[vvTried[n][i]].GetTriedBucket(nKey, m_asmap) != n) + return -17; + if (mapInfo[vvTried[n][i]].GetBucketPosition(nKey, false, n) != i) + return -18; + setTried.erase(vvTried[n][i]); + } } } From d7b7f610a53aa62bd82f6704a9c518a93c896963 Mon Sep 17 00:00:00 2001 From: h Date: Fri, 23 Jul 2021 20:15:16 +0530 Subject: [PATCH 045/112] Updated Readme, Corrected the codesign typo --- contrib/guix/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/guix/README.md b/contrib/guix/README.md index 4680368a6f..fa7f63af87 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -87,7 +87,7 @@ crucial differences: An invocation with all default options would look like: ``` -env DETACHED_SIGS_REPO= ./contrib/guix-codesign +env DETACHED_SIGS_REPO= ./contrib/guix/guix-codesign ``` ## Cleaning intermediate work directories From 198ceb82f91bfdeac6e143ca7433f4a524f6f36f Mon Sep 17 00:00:00 2001 From: jonatack Date: Fri, 23 Jul 2021 20:59:54 +0530 Subject: [PATCH 046/112] script, doc: guix touchups --- contrib/guix/README.md | 7 +++---- doc/build-openbsd.md | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/contrib/guix/README.md b/contrib/guix/README.md index fa7f63af87..2bb464a40d 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -11,7 +11,7 @@ We achieve bootstrappability by using Guix as a functional package manager. # Requirements -Conservatively, a x86_64 machine with: +Conservatively, you will need an x86_64 machine with: - 16GB of free disk space on the partition that /gnu/store will reside in - 8GB of free disk space **per platform triple** you're planning on building @@ -437,9 +437,8 @@ In the extraordinarily rare case where you messed up your Guix installation in an irreversible way, you may want to completely purge Guix from your system and start over. -1. Uninstall Guix itself according to the way you installed it. (e.g. `sudo apt - purge guix` for Ubuntu packaging, `sudo make uninstall` for - built-from-source). +1. Uninstall Guix itself according to the way you installed it (e.g. `sudo apt + purge guix` for Ubuntu packaging, `sudo make uninstall` for a build from source). 2. Remove all build users and groups You may check for relevant users and groups using: diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md index 89fd506f13..6e54f67edc 100644 --- a/doc/build-openbsd.md +++ b/doc/build-openbsd.md @@ -68,7 +68,7 @@ export AUTOMAKE_VERSION=1.16 Make sure `BDB_PREFIX` is set to the appropriate path from the above steps. Note that building with external signer support currently fails on OpenBSD, -hence you have to explicitely disable it by passing the parameter +hence you have to explicitly disable it by passing the parameter `--disable-external-signer` to the configure script. (Background: the feature requires the header-only library boost::process, which is available on OpenBSD 6.9 via Boost 1.72.0, but contains certain system calls From a5f6791139554936d13f367660283899a37ff5c7 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Mon, 24 May 2021 01:40:17 +0200 Subject: [PATCH 047/112] rpc: Add missing gettxoutsetinfo help docs --- src/rpc/blockchain.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index ee2f5a549b..86f6a4320e 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1117,13 +1117,13 @@ static RPCHelpMan gettxoutsetinfo() {RPCResult::Type::STR_AMOUNT, "total_unspendable_amount", "The total amount of coins permanently excluded from the UTXO set (only available if coinstatsindex is used)"}, {RPCResult::Type::OBJ, "block_info", "Info on amounts in the block at this block height (only available if coinstatsindex is used)", { - {RPCResult::Type::STR_AMOUNT, "prevout_spent", ""}, - {RPCResult::Type::STR_AMOUNT, "coinbase", ""}, - {RPCResult::Type::STR_AMOUNT, "new_outputs_ex_coinbase", ""}, - {RPCResult::Type::STR_AMOUNT, "unspendable", ""}, + {RPCResult::Type::STR_AMOUNT, "prevout_spent", "Total amount of all prevouts spent in this block"}, + {RPCResult::Type::STR_AMOUNT, "coinbase", "Coinbase subsidy amount of this block"}, + {RPCResult::Type::STR_AMOUNT, "new_outputs_ex_coinbase", "Total amount of new outputs created by this block"}, + {RPCResult::Type::STR_AMOUNT, "unspendable", "Total amount of unspendable outputs created in this block"}, {RPCResult::Type::OBJ, "unspendables", "Detailed view of the unspendable categories", { - {RPCResult::Type::STR_AMOUNT, "genesis_block", ""}, + {RPCResult::Type::STR_AMOUNT, "genesis_block", "The unspendable amount of the Genesis block subsidy"}, {RPCResult::Type::STR_AMOUNT, "bip30", "Transactions overridden by duplicates (no longer possible with BIP30)"}, {RPCResult::Type::STR_AMOUNT, "scripts", "Amounts sent to scripts that are unspendable (for example OP_RETURN outputs)"}, {RPCResult::Type::STR_AMOUNT, "unclaimed_rewards", "Fee rewards that miners did not claim in their coinbase transaction"}, From d4356d4e48f59c63894b68691cc21ed4892ee716 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Mon, 24 May 2021 17:04:58 +0200 Subject: [PATCH 048/112] rpc: Block until synced if coinstatsindex is used in gettxoutsetinfo During initial sync after startup the gettxoutsetinfo RPC will still return an error while catching up. However, after the initial sync the index will not error immediately anymore when it's in the process of syncing to the tip while being called. Instead it will block until synced and then return the response. --- src/rpc/blockchain.cpp | 19 ++++++++++++------- test/functional/feature_coinstatsindex.py | 14 +------------- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 86f6a4320e..2347fcbb73 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1175,6 +1175,18 @@ static RPCHelpMan gettxoutsetinfo() pindex = ParseHashOrHeight(request.params[1], chainman); } + if (stats.index_requested && g_coin_stats_index) { + if (!g_coin_stats_index->BlockUntilSyncedToCurrentChain()) { + const IndexSummary summary{g_coin_stats_index->GetSummary()}; + + // If a specific block was requested and the index has already synced past that height, we can return the + // data already even though the index is not fully synced yet. + if (pindex->nHeight > summary.best_block_height) { + throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("Unable to get data because coinstatsindex is still syncing. Current height: %d", summary.best_block_height)); + } + } + } + if (GetUTXOStats(coins_view, *blockman, stats, node.rpc_interruption_point, pindex)) { ret.pushKV("height", (int64_t)stats.nHeight); ret.pushKV("bestblock", stats.hashBlock.GetHex()); @@ -1215,13 +1227,6 @@ static RPCHelpMan gettxoutsetinfo() ret.pushKV("block_info", block_info); } } else { - if (g_coin_stats_index) { - const IndexSummary summary{g_coin_stats_index->GetSummary()}; - - if (!summary.synced) { - throw JSONRPCError(RPC_INTERNAL_ERROR, strprintf("Unable to read UTXO set because coinstatsindex is still syncing. Current height: %d", summary.best_block_height)); - } - } throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } return ret; diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py index d3adde5cc5..c2bc485d6b 100755 --- a/test/functional/feature_coinstatsindex.py +++ b/test/functional/feature_coinstatsindex.py @@ -32,7 +32,6 @@ from test_framework.util import ( assert_equal, assert_raises_rpc_error, - try_rpc, ) class CoinStatsIndexTest(BitcoinTestFramework): @@ -76,13 +75,11 @@ def _test_coin_stats_index(self): self.sync_blocks(timeout=120) self.log.info("Test that gettxoutsetinfo() output is consistent with or without coinstatsindex option") - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", node.gettxoutsetinfo)) res0 = node.gettxoutsetinfo('none') # The fields 'disk_size' and 'transactions' do not exist on the index del res0['disk_size'], res0['transactions'] - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) for hash_option in index_hash_options: res1 = index_node.gettxoutsetinfo(hash_option) # The fields 'block_info' and 'total_unspendable_amount' only exist on the index @@ -97,7 +94,6 @@ def _test_coin_stats_index(self): # Generate a new tip node.generate(5) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) for hash_option in index_hash_options: # Fetch old stats by height res2 = index_node.gettxoutsetinfo(hash_option, 102) @@ -176,7 +172,6 @@ def _test_coin_stats_index(self): self.nodes[0].generate(1) self.sync_all() - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) for hash_option in index_hash_options: # Check all amounts were registered correctly res6 = index_node.gettxoutsetinfo(hash_option, 108) @@ -209,7 +204,6 @@ def _test_coin_stats_index(self): self.nodes[0].submitblock(ToHex(block)) self.sync_all() - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) for hash_option in index_hash_options: res7 = index_node.gettxoutsetinfo(hash_option, 109) assert_equal(res7['total_unspendable_amount'], Decimal('80.98999999')) @@ -235,7 +229,6 @@ def _test_coin_stats_index(self): assert_equal(res8, res9) index_node.generate(1) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) res10 = index_node.gettxoutsetinfo('muhash') assert(res8['txouts'] < res10['txouts']) @@ -256,14 +249,12 @@ def _test_reorg_index(self): index_node = self.nodes[1] reorg_blocks = index_node.generatetoaddress(2, index_node.getnewaddress()) reorg_block = reorg_blocks[1] - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) res_invalid = index_node.gettxoutsetinfo('muhash') index_node.invalidateblock(reorg_blocks[0]) assert_equal(index_node.gettxoutsetinfo('muhash')['height'], 110) # Add two new blocks block = index_node.generate(2)[1] - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) res = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=False) # Test that the result of the reorged block is not returned for its old block height @@ -284,9 +275,7 @@ def _test_reorg_index(self): # Ensure that removing and re-adding blocks yields consistent results block = index_node.getblockhash(99) index_node.invalidateblock(block) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) index_node.reconsiderblock(block) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) res3 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=112) assert_equal(res2, res3) @@ -296,8 +285,7 @@ def _test_reorg_index(self): node.getblock(reorg_block) self.restart_node(0, ["-coinstatsindex"]) - self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", node.gettxoutsetinfo, 'muhash')) - assert_raises_rpc_error(-32603, "Unable to read UTXO set", node.gettxoutsetinfo, 'muhash', reorg_block) + assert_raises_rpc_error(-32603, "Unable to get data because coinstatsindex is still syncing.", node.gettxoutsetinfo, 'muhash', reorg_block) def _test_index_rejects_hash_serialized(self): self.log.info("Test that the rpc raises if the legacy hash is passed with the index") From 5b3d4e724f377834e24b1f014787cc7aa7fc30fe Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Mon, 24 May 2021 18:37:40 +0200 Subject: [PATCH 049/112] Index: Improve logging in coinstatsindex More accurate logging of a warning should make clear if the recovery condition was hit while catching the results of the previous block. --- src/index/coinstatsindex.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index b3f5d75fb3..849ea752f2 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -122,9 +122,12 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) uint256 expected_block_hash{pindex->pprev->GetBlockHash()}; if (read_out.first != expected_block_hash) { + LogPrintf("WARNING: previous block header belongs to unexpected block %s; expected %s\n", + read_out.first.ToString(), expected_block_hash.ToString()); + if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { - return error("%s: previous block header belongs to unexpected block %s; expected %s", - __func__, read_out.first.ToString(), expected_block_hash.ToString()); + return error("%s: previous block header not found; expected %s", + __func__, expected_block_hash.ToString()); } } @@ -392,9 +395,12 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex uint256 expected_block_hash{pindex->pprev->GetBlockHash()}; if (read_out.first != expected_block_hash) { + LogPrintf("WARNING: previous block header belongs to unexpected block %s; expected %s\n", + read_out.first.ToString(), expected_block_hash.ToString()); + if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { - return error("%s: previous block header belongs to unexpected block %s; expected %s", - __func__, read_out.first.ToString(), expected_block_hash.ToString()); + return error("%s: previous block header not found; expected %s", + __func__, expected_block_hash.ToString()); } } } From 779e638ca9b2b37c247577d225b93ac762b0602f Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Thu, 3 Jun 2021 02:06:00 +0200 Subject: [PATCH 050/112] coinstats: Add comments for new coinstatsindex values --- src/node/coinstats.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/node/coinstats.h b/src/node/coinstats.h index ae2e46e4d9..69e856dd15 100644 --- a/src/node/coinstats.h +++ b/src/node/coinstats.h @@ -45,14 +45,24 @@ struct CCoinsStats bool index_used{false}; // Following values are only available from coinstats index + + //! Total cumulative amount of block subsidies up to and including this block CAmount total_subsidy{0}; + //! Total cumulative amount of unspendable coins up to and including this block CAmount total_unspendable_amount{0}; + //! Total cumulative amount of prevouts spent up to and including this block CAmount total_prevout_spent_amount{0}; + //! Total cumulative amount of outputs created up to and including this block CAmount total_new_outputs_ex_coinbase_amount{0}; + //! Total cumulative amount of coinbase outputs up to and including this block CAmount total_coinbase_amount{0}; + //! The unspendable coinbase amount from the genesis block CAmount total_unspendables_genesis_block{0}; + //! The two unspendable coinbase outputs total amount caused by BIP30 CAmount total_unspendables_bip30{0}; + //! Total cumulative amount of outputs sent to unspendable scripts (OP_RETURN for example) up to and including this block CAmount total_unspendables_scripts{0}; + //! Total cumulative amount of coins lost due to unclaimed miner rewards up to and including this block CAmount total_unspendables_unclaimed_rewards{0}; CCoinsStats(CoinStatsHashType hash_type) : m_hash_type(hash_type) {} From e3237b1cd07a5099fbb0108218194eb653b6a9f3 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sun, 25 Jul 2021 23:42:27 +0200 Subject: [PATCH 051/112] test: check that CSV/CLTV are active in rpc_signrawtransaction Without this check, the tests would also pass if the CSV and CLTV activation heights are not reached yet (e.g. if the .generate() calls before are removed), as the operations OP_CSV and OP_CLTV simply behave as NOPs. Also fixes a comment in the sub-test `test_signing_with_cltv()`. --- test/functional/rpc_signrawtransaction.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index f3627d1e37..71933fe1ba 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -271,6 +271,7 @@ def test_signing_with_csv(self): # Make sure CSV is active self.nodes[0].generate(500) + assert self.nodes[0].getblockchaininfo()['softforks']['csv']['active'] # Create a P2WSH script with CSV script = CScript([1, OP_CHECKSEQUENCEVERIFY, OP_DROP]) @@ -304,8 +305,9 @@ def test_signing_with_cltv(self): self.nodes[0].walletpassphrase("password", 9999) getcontext().prec = 8 - # Make sure CSV is active + # Make sure CLTV is active self.nodes[0].generate(1500) + assert self.nodes[0].getblockchaininfo()['softforks']['bip65']['active'] # Create a P2WSH script with CLTV script = CScript([1000, OP_CHECKLOCKTIMEVERIFY, OP_DROP]) From 8858e88c840197cdcabea07dd1380ef2aa4ece02 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Mon, 12 Jul 2021 23:53:45 +0200 Subject: [PATCH 052/112] p2p: refactor: tidy up `PeerManagerImpl::Misbehaving(...)` - introduce constant variables `score_before` and `score_after` in order to improve readability - deduplicate calls to LogPrint(), eliminates else-branch --- src/net_processing.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 9c4544df21..d23e546330 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1280,14 +1280,20 @@ void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch, const s if (peer == nullptr) return; LOCK(peer->m_misbehavior_mutex); + const int score_before{peer->m_misbehavior_score}; peer->m_misbehavior_score += howmuch; + const int score_now{peer->m_misbehavior_score}; + const std::string message_prefixed = message.empty() ? "" : (": " + message); - if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) { - LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) DISCOURAGE THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed); + std::string warning; + + if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) { + warning = " DISCOURAGE THRESHOLD EXCEEDED"; peer->m_should_discourage = true; - } else { - LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed); } + + LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n", + pnode, score_before, score_now, warning, message_prefixed); } bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, From 8ca51af1ece371b6f1bdb88b96f16020cc787d13 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Mon, 26 Jul 2021 17:36:01 +0200 Subject: [PATCH 053/112] test: Disable automatic connections by default This prevents the node from trying to connect to random IPs on the internet while running the functional tests. Exceptions are added when required for the test to pass. --- test/functional/feature_anchors.py | 1 + test/functional/feature_config_args.py | 1 + test/functional/test_framework/test_framework.py | 9 ++++++--- test/functional/test_framework/util.py | 8 +++++--- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/test/functional/feature_anchors.py b/test/functional/feature_anchors.py index 24bb02bc90..c39f6e6d4b 100755 --- a/test/functional/feature_anchors.py +++ b/test/functional/feature_anchors.py @@ -23,6 +23,7 @@ def check_node_connections(*, node, num_in, num_out): class AnchorsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 + self.disable_autoconnect = False def setup_network(self): self.setup_nodes() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index de9d0d2e80..5a23df8a13 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -17,6 +17,7 @@ def set_test_params(self): self.num_nodes = 1 self.supports_cli = False self.wallet_names = [] + self.disable_autoconnect = False def test_config_file_parser(self): self.stop_node(0) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 40360c54a0..6d8e6ef45c 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -112,6 +112,9 @@ def __init__(self): # By default the wallet is not required. Set to true by skip_if_no_wallet(). # When False, we ignore wallet_names regardless of what it is. self.requires_wallet = False + # Disable ThreadOpenConnections by default, so that adding entries to + # addrman will not result in automatic connections to them. + self.disable_autoconnect = True self.set_test_params() assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes if self.options.timeout_factor == 0 : @@ -711,7 +714,7 @@ def _initialize_chain(self): if not os.path.isdir(cache_node_dir): self.log.debug("Creating cache directory {}".format(cache_node_dir)) - initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain) + initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect) self.nodes.append( TestNode( CACHE_NODE_ID, @@ -769,7 +772,7 @@ def cache_path(*paths): self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i)) to_dir = get_datadir_path(self.options.tmpdir, i) shutil.copytree(cache_node_dir, to_dir) - initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in bitcoin.conf + initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect) # Overwrite port/rpcport in bitcoin.conf def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. @@ -777,7 +780,7 @@ def _initialize_chain_clean(self): Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): - initialize_datadir(self.options.tmpdir, i, self.chain) + initialize_datadir(self.options.tmpdir, i, self.chain, self.disable_autoconnect) def skip_if_no_py3_zmq(self): """Attempt to import the zmq package and skip the test if the import fails.""" diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 35dbfbba8d..96839abb0e 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -338,17 +338,17 @@ def rpc_url(datadir, i, chain, rpchost): ################ -def initialize_datadir(dirname, n, chain): +def initialize_datadir(dirname, n, chain, disable_autoconnect=True): datadir = get_datadir_path(dirname, n) if not os.path.isdir(datadir): os.makedirs(datadir) - write_config(os.path.join(datadir, "bitcoin.conf"), n=n, chain=chain) + write_config(os.path.join(datadir, "bitcoin.conf"), n=n, chain=chain, disable_autoconnect=disable_autoconnect) os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True) os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True) return datadir -def write_config(config_path, *, n, chain, extra_config=""): +def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect=True): # Translate chain subdirectory name to config name if chain == 'testnet3': chain_name_conf_arg = 'testnet' @@ -376,6 +376,8 @@ def write_config(config_path, *, n, chain, extra_config=""): f.write("shrinkdebugfile=0\n") # To improve SQLite wallet performance so that the tests don't timeout, use -unsafesqlitesync f.write("unsafesqlitesync=1\n") + if disable_autoconnect: + f.write("connect=0\n") f.write(extra_config) From 746f203f1950a7df50b9a7de87a361cc7354ffb4 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Mon, 26 Jul 2021 00:26:55 +0200 Subject: [PATCH 054/112] test: introduce `generate_to_height` helper, use in rpc_signrawtransaction This will speed up the test a bit and avoid potential .generate() RPC timeouts (in sub-test `test_signing_with_cltv()`) on slower machines. --- test/functional/rpc_signrawtransaction.py | 5 +++-- test/functional/test_framework/util.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index 71933fe1ba..dacd7b9c5e 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -15,6 +15,7 @@ assert_equal, assert_raises_rpc_error, find_vout_for_address, + generate_to_height, hex_str_to_bytes, ) from test_framework.messages import ( @@ -270,7 +271,7 @@ def test_signing_with_csv(self): getcontext().prec = 8 # Make sure CSV is active - self.nodes[0].generate(500) + generate_to_height(self.nodes[0], 500) assert self.nodes[0].getblockchaininfo()['softforks']['csv']['active'] # Create a P2WSH script with CSV @@ -306,7 +307,7 @@ def test_signing_with_cltv(self): getcontext().prec = 8 # Make sure CLTV is active - self.nodes[0].generate(1500) + generate_to_height(self.nodes[0], 1500) assert self.nodes[0].getblockchaininfo()['softforks']['bip65']['active'] # Create a P2WSH script with CLTV diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 35dbfbba8d..fcaf3b2c29 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -559,6 +559,17 @@ def mine_large_block(node, utxos=None): node.generate(1) +def generate_to_height(node, target_height): + """Generates blocks until a given target block height has been reached. + To prevent timeouts, only up to 200 blocks are generated per RPC call. + Can be used to activate certain soft-forks (e.g. CSV, CLTV).""" + current_height = node.getblockcount() + while current_height < target_height: + nblocks = min(200, target_height - current_height) + current_height += len(node.generate(nblocks)) + assert_equal(node.getblockcount(), target_height) + + def find_vout_for_address(node, txid, addr): """ Locate the vout index of the given transaction sending to the From 12f094ec215aacf30e4e380c0399f80d4e45c345 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Mon, 26 Jul 2021 00:33:58 +0200 Subject: [PATCH 055/112] test: use constants for CSV/CLTV activation heights in rpc_signrawtransaction --- test/functional/feature_cltv.py | 3 +-- test/functional/feature_csv_activation.py | 2 +- test/functional/rpc_signrawtransaction.py | 10 +++++++--- test/functional/test_framework/blocktools.py | 4 ++++ 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 10d2072dba..7c14f5d5a6 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -9,6 +9,7 @@ """ from test_framework.blocktools import ( + CLTV_HEIGHT, create_block, create_coinbase, ) @@ -31,8 +32,6 @@ MiniWalletMode, ) -CLTV_HEIGHT = 1351 - # Helper function to modify a transaction by # 1) prepending a given script to the scriptSig of vin 0 and diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 5081867319..1ac1a0563f 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -41,6 +41,7 @@ import time from test_framework.blocktools import ( + CSV_ACTIVATION_HEIGHT, create_block, create_coinbase, ) @@ -63,7 +64,6 @@ TESTING_TX_COUNT = 83 # Number of testing transactions: 1 BIP113 tx, 16 BIP68 txs, 66 BIP112 txs (see comments above) COINBASE_BLOCK_COUNT = TESTING_TX_COUNT # Number of coinbase blocks we need to generate as inputs for our txs BASE_RELATIVE_LOCKTIME = 10 -CSV_ACTIVATION_HEIGHT = 432 SEQ_DISABLE_FLAG = 1 << 31 SEQ_RANDOM_HIGH_BIT = 1 << 25 SEQ_TYPE_FLAG = 1 << 22 diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index dacd7b9c5e..571029155e 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -4,7 +4,11 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test transaction signing using the signrawtransaction* RPCs.""" -from test_framework.blocktools import COINBASE_MATURITY +from test_framework.blocktools import ( + CLTV_HEIGHT, + COINBASE_MATURITY, + CSV_ACTIVATION_HEIGHT, +) from test_framework.address import ( script_to_p2sh, script_to_p2wsh, @@ -271,7 +275,7 @@ def test_signing_with_csv(self): getcontext().prec = 8 # Make sure CSV is active - generate_to_height(self.nodes[0], 500) + generate_to_height(self.nodes[0], CSV_ACTIVATION_HEIGHT) assert self.nodes[0].getblockchaininfo()['softforks']['csv']['active'] # Create a P2WSH script with CSV @@ -307,7 +311,7 @@ def test_signing_with_cltv(self): getcontext().prec = 8 # Make sure CLTV is active - generate_to_height(self.nodes[0], 1500) + generate_to_height(self.nodes[0], CLTV_HEIGHT) assert self.nodes[0].getblockchaininfo()['softforks']['bip65']['active'] # Create a P2WSH script with CLTV diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 833a215993..2ab720aafb 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -55,6 +55,10 @@ # Coinbase transaction outputs can only be spent after this number of new blocks (network rule) COINBASE_MATURITY = 100 +# Soft-fork activation heights +CLTV_HEIGHT = 1351 +CSV_ACTIVATION_HEIGHT = 432 + # From BIP141 WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed" From fbeb8c43bc5bce131e15eb9e162ea457bfe2b83e Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 28 May 2021 12:03:37 +0800 Subject: [PATCH 056/112] test: add type annotations to util.get_rpc_proxy Remove proxy.url assignment: error: "AuthServiceProxy" has no attribute "url" --- test/functional/test_framework/coverage.py | 21 ++++++++++++--------- test/functional/test_framework/test_node.py | 2 +- test/functional/test_framework/util.py | 13 ++++++------- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py index 7705dd3e4d..ad8cfe5c9a 100644 --- a/test/functional/test_framework/coverage.py +++ b/test/functional/test_framework/coverage.py @@ -10,6 +10,7 @@ import os +from .authproxy import AuthServiceProxy REFERENCE_FILENAME = 'rpc_interface.txt' @@ -19,16 +20,17 @@ class AuthServiceProxyWrapper(): An object that wraps AuthServiceProxy to record specific RPC calls. """ - def __init__(self, auth_service_proxy_instance, coverage_logfile=None): + def __init__(self, auth_service_proxy_instance: AuthServiceProxy, rpc_url: str, coverage_logfile: str=None): """ Kwargs: - auth_service_proxy_instance (AuthServiceProxy): the instance - being wrapped. - coverage_logfile (str): if specified, write each service_name + auth_service_proxy_instance: the instance being wrapped. + rpc_url: url of the RPC instance being wrapped + coverage_logfile: if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance + self.rpc_url = rpc_url self.coverage_logfile = coverage_logfile def __getattr__(self, name): @@ -36,7 +38,7 @@ def __getattr__(self, name): if not isinstance(return_val, type(self.auth_service_proxy_instance)): # If proxy getattr returned an unwrapped value, do the same here. return return_val - return AuthServiceProxyWrapper(return_val, self.coverage_logfile) + return AuthServiceProxyWrapper(return_val, self.rpc_url, self.coverage_logfile) def __call__(self, *args, **kwargs): """ @@ -57,6 +59,7 @@ def _log_call(self): def __truediv__(self, relative_uri): return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, + self.rpc_url, self.coverage_logfile) def get_request(self, *args, **kwargs): @@ -74,18 +77,18 @@ def get_filename(dirname, n_node): dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) -def write_all_rpc_commands(dirname, node): +def write_all_rpc_commands(dirname: str, node: AuthServiceProxy) -> bool: """ Write out a list of all RPC functions available in `bitcoin-cli` for coverage comparison. This will only happen once per coverage directory. Args: - dirname (str): temporary test dir - node (AuthServiceProxy): client + dirname: temporary test dir + node: client Returns: - bool. if the RPC interface file was written. + if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index afa904c8d7..f9e2cfa2f5 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -258,7 +258,7 @@ def wait_for_rpc_connection(self): return self.rpc = rpc self.rpc_connected = True - self.url = self.rpc.url + self.url = self.rpc.rpc_url return except JSONRPCException as e: # Initialization phase # -28 RPC in warmup diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 35dbfbba8d..285a1bf52c 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -286,15 +286,15 @@ class PortSeed: n = None -def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None): +def get_rpc_proxy(url: str, node_number: int, *, timeout: int=None, coveragedir: str=None) -> coverage.AuthServiceProxyWrapper: """ Args: - url (str): URL of the RPC server to call - node_number (int): the node number (or id) that this calls to + url: URL of the RPC server to call + node_number: the node number (or id) that this calls to Kwargs: - timeout (int): HTTP timeout in seconds - coveragedir (str): Directory + timeout: HTTP timeout in seconds + coveragedir: Directory Returns: AuthServiceProxy. convenience object for making RPC calls. @@ -305,11 +305,10 @@ def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None): proxy_kwargs['timeout'] = int(timeout) proxy = AuthServiceProxy(url, **proxy_kwargs) - proxy.url = url # store URL on proxy for info coverage_logfile = coverage.get_filename(coveragedir, node_number) if coveragedir else None - return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) + return coverage.AuthServiceProxyWrapper(proxy, url, coverage_logfile) def p2p_port(n): From 4148c5228fe26175141c84af3e0465088dcfd869 Mon Sep 17 00:00:00 2001 From: "Cuong V. Nguyen" Date: Tue, 27 Jul 2021 14:01:08 +0700 Subject: [PATCH 057/112] Fix typo in comment --- contrib/verifybinaries/verify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/verifybinaries/verify.py b/contrib/verifybinaries/verify.py index 6cbaf2dc0c..51c151add8 100755 --- a/contrib/verifybinaries/verify.py +++ b/contrib/verifybinaries/verify.py @@ -2,7 +2,7 @@ # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Script for verifying Bitoin Core release binaries +"""Script for verifying Bitcoin Core release binaries This script attempts to download the signature file SHA256SUMS.asc from bitcoincore.org and bitcoin.org and compares them. From e4c8bb62e4a6873c45f42d0d2a24927cb241a0ea Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Fri, 7 May 2021 19:25:31 +0300 Subject: [PATCH 058/112] build: Fix undefined reference to __mulodi4 When compiling with clang on 32-bit systems the __mulodi4 symbol is defined in compiler-rt only. --- build-aux/m4/bitcoin_runtime_lib.m4 | 42 +++++++++++++++++++++++ configure.ac | 4 +++ src/Makefile.test.include | 2 +- src/test/fuzz/multiplication_overflow.cpp | 12 +++---- 4 files changed, 51 insertions(+), 9 deletions(-) create mode 100644 build-aux/m4/bitcoin_runtime_lib.m4 diff --git a/build-aux/m4/bitcoin_runtime_lib.m4 b/build-aux/m4/bitcoin_runtime_lib.m4 new file mode 100644 index 0000000000..1a6922deca --- /dev/null +++ b/build-aux/m4/bitcoin_runtime_lib.m4 @@ -0,0 +1,42 @@ +# On some platforms clang builtin implementations +# require compiler-rt as a runtime library to use. +# +# See: +# - https://bugs.llvm.org/show_bug.cgi?id=28629 + +m4_define([_CHECK_RUNTIME_testbody], [[ + bool f(long long x, long long y, long long* p) + { + return __builtin_mul_overflow(x, y, p); + } + int main() { return 0; } +]]) + +AC_DEFUN([CHECK_RUNTIME_LIB], [ + + AC_LANG_PUSH([C++]) + + AC_MSG_CHECKING([for __builtin_mul_overflow]) + AC_LINK_IFELSE( + [AC_LANG_SOURCE([_CHECK_RUNTIME_testbody])], + [ + AC_MSG_RESULT([yes]) + AC_DEFINE([HAVE_BUILTIN_MUL_OVERFLOW], [1], [Define if you have a working __builtin_mul_overflow]) + ], + [ + ax_check_save_flags="$LDFLAGS" + LDFLAGS="$LDFLAGS --rtlib=compiler-rt -lgcc_s" + AC_LINK_IFELSE( + [AC_LANG_SOURCE([_CHECK_RUNTIME_testbody])], + [ + AC_MSG_RESULT([yes, with additional linker flags]) + RUNTIME_LDFLAGS="--rtlib=compiler-rt -lgcc_s" + AC_DEFINE([HAVE_BUILTIN_MUL_OVERFLOW], [1], [Define if you have a working __builtin_mul_overflow]) + ], + [AC_MSG_RESULT([no])]) + LDFLAGS="$ax_check_save_flags" + ]) + + AC_LANG_POP + AC_SUBST([RUNTIME_LDFLAGS]) +]) diff --git a/configure.ac b/configure.ac index 2bc404250d..0a5456acff 100644 --- a/configure.ac +++ b/configure.ac @@ -1773,6 +1773,10 @@ if test x$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_ AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --with-gui --enable-bench or --enable-tests]) fi +if test x$enable_fuzz_binary = xyes; then + CHECK_RUNTIME_LIB +fi + AM_CONDITIONAL([TARGET_DARWIN], [test x$TARGET_OS = xdarwin]) AM_CONDITIONAL([BUILD_DARWIN], [test x$BUILD_OS = xdarwin]) AM_CONDITIONAL([TARGET_LINUX], [test x$TARGET_OS = xlinux]) diff --git a/src/Makefile.test.include b/src/Makefile.test.include index d55f5e1850..15929e7352 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -200,7 +200,7 @@ if ENABLE_FUZZ_BINARY test_fuzz_fuzz_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_fuzz_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_fuzz_LDADD = $(FUZZ_SUITE_LD_COMMON) -test_fuzz_fuzz_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) +test_fuzz_fuzz_LDFLAGS = $(FUZZ_SUITE_LDFLAGS_COMMON) $(RUNTIME_LDFLAGS) test_fuzz_fuzz_SOURCES = \ test/fuzz/addition_overflow.cpp \ test/fuzz/addrdb.cpp \ diff --git a/src/test/fuzz/multiplication_overflow.cpp b/src/test/fuzz/multiplication_overflow.cpp index 0f054529a6..c7251650c2 100644 --- a/src/test/fuzz/multiplication_overflow.cpp +++ b/src/test/fuzz/multiplication_overflow.cpp @@ -2,6 +2,10 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#if defined(HAVE_CONFIG_H) +#include +#endif + #include #include #include @@ -10,14 +14,6 @@ #include #include -#if defined(__has_builtin) -#if __has_builtin(__builtin_mul_overflow) -#define HAVE_BUILTIN_MUL_OVERFLOW -#endif -#elif defined(__GNUC__) -#define HAVE_BUILTIN_MUL_OVERFLOW -#endif - namespace { template void TestMultiplicationOverflow(FuzzedDataProvider& fuzzed_data_provider) From 787296eb6744b15ab693c053e4030ff68dfc95e0 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Mon, 26 Jul 2021 18:00:46 +0200 Subject: [PATCH 059/112] fuzz: silence a compiler warning about unused CBanEntry comparator ``` test/fuzz/banman.cpp:35:13: warning: unused function 'operator==' [-Wunused-function] static bool operator==(const CBanEntry& lhs, const CBanEntry& rhs) ^ 1 warning generated. ``` --- src/test/fuzz/banman.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp index 85a2ac990c..1986b5e4c8 100644 --- a/src/test/fuzz/banman.cpp +++ b/src/test/fuzz/banman.cpp @@ -109,8 +109,9 @@ FUZZ_TARGET_INIT(banman, initialize_banman) BanMan ban_man_read{banlist_file, /* client_interface */ nullptr, /* default_ban_time */ 0}; banmap_t banmap_read; ban_man_read.GetBanned(banmap_read); - // Temporarily disabled to allow the remainder of the fuzz test to run while a fix is being worked on: - // assert(banmap == banmap_read); + // Assert temporarily disabled to allow the remainder of the fuzz test to run while a + // fix is being worked on. See https://github.com/bitcoin/bitcoin/pull/22517 + (void)(banmap == banmap_read); } } fs::remove(banlist_file.string() + ".dat"); From 84ace9aef116a05e034730f2bb2f109d1d77aac7 Mon Sep 17 00:00:00 2001 From: 0xb10c <0xb10c@gmail.com> Date: Thu, 20 May 2021 15:21:34 +0200 Subject: [PATCH 060/112] doc: Add initial USDT documentation Both added files are extended in the following commits. doc/usdt.md is based on earlier work by laanwj. Co-authored-by: W. J. van der Laan --- contrib/tracing/README.md | 45 +++++++++ doc/tracing.md | 204 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 249 insertions(+) create mode 100644 contrib/tracing/README.md create mode 100644 doc/tracing.md diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md new file mode 100644 index 0000000000..e4da724750 --- /dev/null +++ b/contrib/tracing/README.md @@ -0,0 +1,45 @@ +Example scripts for User-space, Statically Defined Tracing (USDT) +================================================================= + +This directory contains scripts showcasing User-space, Statically Defined +Tracing (USDT) support for Bitcoin Core on Linux using. For more information on +USDT support in Bitcoin Core see the [USDT documentation]. + +[USDT documentation]: ../../doc/tracing.md + + +Examples for the two main eBPF front-ends, [bpftrace] and +[BPF Compiler Collection (BCC)], with support for USDT, are listed. BCC is used +for complex tools and daemons and `bpftrace` is preferred for one-liners and +shorter scripts. + +[bpftrace]: https://github.com/iovisor/bpftrace +[BPF Compiler Collection (BCC)]: https://github.com/iovisor/bcc + + +To develop and run bpftrace and BCC scripts you need to install the +corresponding packages. See [installing bpftrace] and [installing BCC] for more +information. For development there exist a [bpftrace Reference Guide], a +[BCC Reference Guide], and a [bcc Python Developer Tutorial]. + +[installing bpftrace]: https://github.com/iovisor/bpftrace/blob/master/INSTALL.md +[installing BCC]: https://github.com/iovisor/bcc/blob/master/INSTALL.md +[bpftrace Reference Guide]: https://github.com/iovisor/bpftrace/blob/master/docs/reference_guide.md +[BCC Reference Guide]: https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md +[bcc Python Developer Tutorial]: https://github.com/iovisor/bcc/blob/master/docs/tutorial_bcc_python_developer.md + +## Examples + +The bpftrace examples contain a relative path to the `bitcoind` binary. By +default, the scripts should be run from the repository-root and assume a +self-compiled `bitcoind` binary. The paths in the examples can be changed, for +example, to point to release builds if needed. See the +[Bitcoin Core USDT documentation] on how to list available tracepoints in your +`bitcoind` binary. + +[Bitcoin Core USDT documentation]: ../../doc/tracing.md#listing-available-tracepoints + +**WARNING: eBPF programs require root privileges to be loaded into a Linux +kernel VM. This means the bpftrace and BCC examples must be executed with root +privileges. Make sure to carefully review any scripts that you run with root +privileges first!** diff --git a/doc/tracing.md b/doc/tracing.md new file mode 100644 index 0000000000..4c472b4154 --- /dev/null +++ b/doc/tracing.md @@ -0,0 +1,204 @@ +# User-space, Statically Defined Tracing (USDT) for Bitcoin Core + +Bitcoin Core includes statically defined tracepoints to allow for more +observability during development, debugging, code review, and production usage. +These tracepoints make it possible to keep track of custom statistics and +enable detailed monitoring of otherwise hidden internals. They have +little to no performance impact when unused. + +``` +eBPF and USDT Overview +====================== + + ┌──────────────────┐ ┌──────────────┐ + │ tracing script │ │ bitcoind │ + │==================│ 2. │==============│ + │ eBPF │ tracing │ hooks │ │ + │ code │ logic │ into┌─┤►tracepoint 1─┼───┐ 3. + └────┬───┴──▲──────┘ ├─┤►tracepoint 2 │ │ pass args + 1. │ │ 4. │ │ ... │ │ to eBPF + User compiles │ │ pass data to │ └──────────────┘ │ program + Space & loads │ │ tracing script │ │ + ─────────────────┼──────┼─────────────────┼────────────────────┼─── + Kernel │ │ │ │ + Space ┌──┬─▼──────┴─────────────────┴────────────┐ │ + │ │ eBPF program │◄──────┘ + │ └───────────────────────────────────────┤ + │ eBPF kernel Virtual Machine (sandboxed) │ + └──────────────────────────────────────────┘ + +1. The tracing script compiles the eBPF code and loads the eBPF program into a kernel VM +2. The eBPF program hooks into one or more tracepoints +3. When the tracepoint is called, the arguments are passed to the eBPF program +4. The eBPF program processes the arguments and returns data to the tracing script +``` + +The Linux kernel can hook into the tracepoints during runtime and pass data to +sandboxed [eBPF] programs running in the kernel. These eBPF programs can, for +example, collect statistics or pass data back to user-space scripts for further +processing. + +[eBPF]: https://ebpf.io/ + +The two main eBPF front-ends with support for USDT are [bpftrace] and +[BPF Compiler Collection (BCC)]. BCC is used for complex tools and daemons and +`bpftrace` is preferred for one-liners and shorter scripts. Examples for both can +be found in [contrib/tracing]. + +[bpftrace]: https://github.com/iovisor/bpftrace +[BPF Compiler Collection (BCC)]: https://github.com/iovisor/bcc +[contrib/tracing]: ../contrib/tracing/ + +## Tracepoint documentation + +The currently available tracepoints are listed here. + +## Adding tracepoints to Bitcoin Core + +To add a new tracepoint, `#include ` in the compilation unit where +the tracepoint is inserted. Use one of the `TRACEx` macros listed below +depending on the number of arguments passed to the tracepoint. Up to 12 +arguments can be provided. The `context` and `event` specify the names by which +the tracepoint is referred to. Please use `snake_case` and try to make sure that +the tracepoint names make sense even without detailed knowledge of the +implementation details. Do not forget to update the tracepoint list in this +document. + +```c +#define TRACE(context, event) +#define TRACE1(context, event, a) +#define TRACE2(context, event, a, b) +#define TRACE3(context, event, a, b, c) +#define TRACE4(context, event, a, b, c, d) +#define TRACE5(context, event, a, b, c, d, e) +#define TRACE6(context, event, a, b, c, d, e, f) +#define TRACE7(context, event, a, b, c, d, e, f, g) +#define TRACE8(context, event, a, b, c, d, e, f, g, h) +#define TRACE9(context, event, a, b, c, d, e, f, g, h, i) +#define TRACE10(context, event, a, b, c, d, e, f, g, h, i, j) +#define TRACE11(context, event, a, b, c, d, e, f, g, h, i, j, k) +#define TRACE12(context, event, a, b, c, d, e, f, g, h, i, j, k, l) +``` + +For example: + +```C++ +TRACE6(net, inbound_message, + pnode->GetId(), + pnode->GetAddrName().c_str(), + pnode->ConnectionTypeAsString().c_str(), + sanitizedType.c_str(), + msg.data.size(), + msg.data.data() +); +``` + +### Guidelines and best practices + +#### Clear motivation and use-case +Tracepoints need a clear motivation and use-case. The motivation should +outweigh the impact on, for example, code readability. There is no point in +adding tracepoints that don't end up being used. + +#### Provide an example +When adding a new tracepoint, provide an example. Examples can show the use case +and help reviewers testing that the tracepoint works as intended. The examples +can be kept simple but should give others a starting point when working with +the tracepoint. See existing examples in [contrib/tracing/]. + +[contrib/tracing/]: ../contrib/tracing/ + +#### No expensive computations for tracepoints +Data passed to the tracepoint should be inexpensive to compute. Although the +tracepoint itself only has overhead when enabled, the code to compute arguments +is always run - even if the tracepoint is not used. For example, avoid +serialization and parsing. + +#### Semi-stable API +Tracepoints should have a semi-stable API. Users should be able to rely on the +tracepoints for scripting. This means tracepoints need to be documented, and the +argument order ideally should not change. If there is an important reason to +change argument order, make sure to document the change and update the examples +using the tracepoint. + +#### eBPF Virtual Machine limits +Keep the eBPF Virtual Machine limits in mind. eBPF programs receiving data from +the tracepoints run in a sandboxed Linux kernel VM. This VM has a limited stack +size of 512 bytes. Check if it makes sense to pass larger amounts of data, for +example, with a tracing script that can handle the passed data. + +#### `bpftrace` argument limit +While tracepoints can have up to 12 arguments, bpftrace scripts currently only +support reading from the first six arguments (`arg0` till `arg5`) on `x86_64`. +bpftrace currently lacks real support for handling and printing binary data, +like block header hashes and txids. When a tracepoint passes more than six +arguments, then string and integer arguments should preferably be placed in the +first six argument fields. Binary data can be placed in later arguments. The BCC +supports reading from all 12 arguments. + +#### Strings as C-style String +Generally, strings should be passed into the `TRACEx` macros as pointers to +C-style strings (a null-terminated sequence of characters). For C++ +`std::strings`, [`c_str()`] can be used. It's recommended to document the +maximum expected string size if known. + + +[`c_str()`]: https://www.cplusplus.com/reference/string/string/c_str/ + + +## Listing available tracepoints + +Multiple tools can list the available tracepoints in a `bitcoind` binary with +USDT support. + +### GDB - GNU Project Debugger + +To list probes in Bitcoin Core, use `info probes` in `gdb`: + +``` +$ gdb ./src/bitcoind +… +(gdb) info probes +Type Provider Name Where Semaphore Object +stap net inbound_message 0x000000000014419e /src/bitcoind +stap net outbound_message 0x0000000000107c05 /src/bitcoind +stap validation block_connected 0x00000000002fb10c /src/bitcoind +… +``` + +### With `readelf` + +The `readelf` tool can be used to display the USDT tracepoints in Bitcoin Core. +Look for the notes with the description `NT_STAPSDT`. + +``` +$ readelf -n ./src/bitcoind | grep NT_STAPSDT -A 4 -B 2 +Displaying notes found in: .note.stapsdt + Owner Data size Description + stapsdt 0x0000005d NT_STAPSDT (SystemTap probe descriptors) + Provider: net + Name: outbound_message + Location: 0x0000000000107c05, Base: 0x0000000000579c90, Semaphore: 0x0000000000000000 + Arguments: -8@%r12 8@%rbx 8@%rdi 8@192(%rsp) 8@%rax 8@%rdx +… +``` + +### With `tplist` + +The `tplist` tool is provided by BCC (see [Installing BCC]). It displays kernel +tracepoints or USDT probes and their formats (for more information, see the +[`tplist` usage demonstration]). There are slight binary naming differences +between distributions. For example, on +[Ubuntu the binary is called `tplist-bpfcc`][ubuntu binary]. + +[Installing BCC]: https://github.com/iovisor/bcc/blob/master/INSTALL.md +[`tplist` usage demonstration]: https://github.com/iovisor/bcc/blob/master/tools/tplist_example.txt +[ubuntu binary]: https://github.com/iovisor/bcc/blob/master/INSTALL.md#ubuntu---binary + +``` +$ tplist -l ./src/bitcoind -v +b'net':b'outbound_message' [sema 0x0] + 1 location(s) + 6 argument(s) +… +``` From 469b71ae629228b2591a55831817a0e5fad89360 Mon Sep 17 00:00:00 2001 From: 0xb10c <0xb10c@gmail.com> Date: Mon, 7 Jun 2021 18:24:11 +0200 Subject: [PATCH 061/112] doc: document systemtap dependency --- doc/build-unix.md | 9 +++++++++ doc/dependencies.md | 2 ++ 2 files changed, 11 insertions(+) diff --git a/doc/build-unix.md b/doc/build-unix.md index 73c0bf8779..4a56114109 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -48,6 +48,7 @@ Optional dependencies: univalue | Utility | JSON parsing and encoding (bundled version will be used unless --with-system-univalue passed to configure) libzmq3 | ZMQ notification | Optional, allows generating ZMQ notifications (requires ZMQ version >= 4.0.0) sqlite3 | SQLite DB | Optional, wallet storage (only needed when wallet enabled) + systemtap | Tracing (USDT) | Optional, statically defined tracepoints For the versions used, see [dependencies.md](dependencies.md) @@ -107,6 +108,10 @@ ZMQ dependencies (provides ZMQ API): sudo apt-get install libzmq3-dev +User-Space, Statically Defined Tracing (USDT) dependencies: + + sudo apt install systemtap-sdt-dev + GUI dependencies: If you want to build bitcoin-qt, make sure that the required packages for Qt development @@ -162,6 +167,10 @@ ZMQ dependencies (provides ZMQ API): sudo dnf install zeromq-devel +User-Space, Statically Defined Tracing (USDT) dependencies: + + sudo dnf install systemtap + GUI dependencies: If you want to build bitcoin-qt, make sure that the required packages for Qt development diff --git a/doc/dependencies.md b/doc/dependencies.md index 66c5a76b3b..b7634718e8 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -24,6 +24,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct | Qt | [5.12.11](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | | | SQLite | [3.32.1](https://sqlite.org/download.html) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | | | | | XCB | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) | +| systemtap ([tracing](tracing.md))| | | | | | | xkbcommon | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) | | ZeroMQ | [4.3.1](https://github.com/zeromq/libzmq/releases) | 4.0.0 | No | | | | zlib | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) | @@ -41,6 +42,7 @@ Some dependencies are not needed in all configurations. The following are some f * SQLite is not needed with `--disable-wallet` or `--without-sqlite`. * Qt is not needed with `--without-gui`. * If the qrencode dependency is absent, QR support won't be added. To force an error when that happens, pass `--with-qrencode`. +* If the systemtap dependency is absent, USDT support won't compiled in. * ZeroMQ is needed only with the `--with-zmq` option. #### Other From 4224dec22baa66547303840707cf1d4f15a49b20 Mon Sep 17 00:00:00 2001 From: 0xb10c <0xb10c@gmail.com> Date: Thu, 20 May 2021 16:54:54 +0200 Subject: [PATCH 062/112] tracing: Tracepoints for in- and outbound P2P msgs Can be used to monitor in- and outbound node traffic. Based on ealier work by jb55. Co-authored-by: William Casarin --- contrib/tracing/README.md | 120 +++++++++++++ contrib/tracing/log_p2p_traffic.bt | 28 ++++ contrib/tracing/log_raw_p2p_msgs.py | 180 ++++++++++++++++++++ contrib/tracing/p2p_monitor.py | 250 ++++++++++++++++++++++++++++ doc/tracing.md | 40 +++++ src/net.cpp | 12 +- src/net_processing.cpp | 10 ++ 7 files changed, 639 insertions(+), 1 deletion(-) create mode 100755 contrib/tracing/log_p2p_traffic.bt create mode 100755 contrib/tracing/log_raw_p2p_msgs.py create mode 100755 contrib/tracing/p2p_monitor.py diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md index e4da724750..2d81e375dc 100644 --- a/contrib/tracing/README.md +++ b/contrib/tracing/README.md @@ -43,3 +43,123 @@ example, to point to release builds if needed. See the kernel VM. This means the bpftrace and BCC examples must be executed with root privileges. Make sure to carefully review any scripts that you run with root privileges first!** + +### log_p2p_traffic.bt + +A bpftrace script logging information about inbound and outbound P2P network +messages. Based on the `net:inbound_message` and `net:outbound_message` +tracepoints. + +By default, `bpftrace` limits strings to 64 bytes due to the limited stack size +in the eBPF VM. For example, Tor v3 addresses exceed the string size limit which +results in the port being cut off during logging. The string size limit can be +increased with the `BPFTRACE_STRLEN` environment variable (`BPFTRACE_STRLEN=70` +works fine). + +``` +$ bpftrace contrib/tracing/log_p2p_traffic.bt +``` + +Output +``` +outbound 'ping' msg to peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes +inbound 'pong' msg from peer 11 (outbound-full-relay, [2a02:b10c:f747:1:ef:fake:ipv6:addr]:8333) with 8 bytes +inbound 'inv' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes +outbound 'getdata' msg to peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 37 bytes +inbound 'tx' msg from peer 16 (outbound-full-relay, XX.XX.XXX.121:8333) with 222 bytes +outbound 'inv' msg to peer 9 (outbound-full-relay, faketorv3addressa2ufa6odvoi3s77j4uegey0xb10csyfyve2t33curbyd.onion:8333) with 37 bytes +outbound 'inv' msg to peer 7 (outbound-full-relay, XX.XX.XXX.242:8333) with 37 bytes +… +``` + +### p2p_monitor.py + +A BCC Python script using curses for an interactive P2P message monitor. Based +on the `net:inbound_message` and `net:outbound_message` tracepoints. + +Inbound and outbound traffic is listed for each peer together with information +about the connection. Peers can be selected individually to view recent P2P +messages. + +``` +$ python3 contrib/tracing/p2p_monitor.py ./src/bitcoind +``` + +Lists selectable peers and traffic and connection information. +``` + P2P Message Monitor + Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages + + PEER OUTBOUND INBOUND TYPE ADDR + 0 46 398 byte 61 1407590 byte block-relay-only XX.XX.XXX.196:8333 + 11 1156 253570 byte 3431 2394924 byte outbound-full-relay XXX.X.XX.179:8333 + 13 3425 1809620 byte 1236 305458 byte inbound XXX.X.X.X:60380 + 16 1046 241633 byte 1589 1199220 byte outbound-full-relay 4faketorv2pbfu7x.onion:8333 + 19 577 181679 byte 390 148951 byte outbound-full-relay kfake4vctorjv2o2.onion:8333 + 20 11 1248 byte 13 1283 byte block-relay-only [2600:fake:64d9:b10c:4436:aaaa:fe:bb]:8333 + 21 11 1248 byte 13 1299 byte block-relay-only XX.XXX.X.155:8333 + 22 5 103 byte 1 102 byte feeler XX.XX.XXX.173:8333 + 23 11 1248 byte 12 1255 byte block-relay-only XX.XXX.XXX.220:8333 + 24 3 103 byte 1 102 byte feeler XXX.XXX.XXX.64:8333 +… +``` + +Showing recent P2P messages between our node and a selected peer. + +``` + ---------------------------------------------------------------------- + | PEER 16 (4faketorv2pbfu7x.onion:8333) | + | OUR NODE outbound-full-relay PEER | + | <--- sendcmpct (9 bytes) | + | inv (37 byte) ---> | + | <--- ping (8 bytes) | + | pong (8 byte) ---> | + | inv (37 byte) ---> | + | <--- addr (31 bytes) | + | inv (37 byte) ---> | + | <--- getheaders (1029 bytes) | + | headers (1 byte) ---> | + | <--- feefilter (8 bytes) | + | <--- pong (8 bytes) | + | <--- headers (82 bytes) | + | <--- addr (30003 bytes) | + | inv (1261 byte) ---> | + | … | + +``` + +### log_raw_p2p_msgs.py + +A BCC Python script showcasing eBPF and USDT limitations when passing data +larger than about 32kb. Based on the `net:inbound_message` and +`net:outbound_message` tracepoints. + +Bitcoin P2P messages can be larger than 32kb (e.g. `tx`, `block`, ...). The +eBPF VM's stack is limited to 512 bytes, and we can't allocate more than about +32kb for a P2P message in the eBPF VM. The **message data is cut off** when the +message is larger than MAX_MSG_DATA_LENGTH (see script). This can be detected +in user-space by comparing the data length to the message length variable. The +message is cut off when the data length is smaller than the message length. +A warning is included with the printed message data. + +Data is submitted to user-space (i.e. to this script) via a ring buffer. The +throughput of the ring buffer is limited. Each p2p_message is about 32kb in +size. In- or outbound messages submitted to the ring buffer in rapid +succession fill the ring buffer faster than it can be read. Some messages are +lost. BCC prints: `Possibly lost 2 samples` on lost messages. + + +``` +$ python3 contrib/tracing/log_raw_p2p_msgs.py ./src/bitcoind +``` + +``` +Logging raw P2P messages. +Messages larger that about 32kb will be cut off! +Some messages might be lost! + outbound msg 'inv' from peer 4 (outbound-full-relay, XX.XXX.XX.4:8333) with 253 bytes: 0705000000be2245c8f844c9f763748e1a7… +… +Warning: incomplete message (only 32568 out of 53552 bytes)! inbound msg 'tx' from peer 32 (outbound-full-relay, XX.XXX.XXX.43:8333) with 53552 bytes: 020000000001fd3c01939c85ad6756ed9fc… +… +Possibly lost 2 samples +``` diff --git a/contrib/tracing/log_p2p_traffic.bt b/contrib/tracing/log_p2p_traffic.bt new file mode 100755 index 0000000000..f62956aa5e --- /dev/null +++ b/contrib/tracing/log_p2p_traffic.bt @@ -0,0 +1,28 @@ +#!/usr/bin/env bpftrace + +BEGIN +{ + printf("Logging P2P traffic\n") +} + +usdt:./src/bitcoind:net:inbound_message +{ + $peer_id = (int64) arg0; + $peer_addr = str(arg1); + $peer_type = str(arg2); + $msg_type = str(arg3); + $msg_len = arg4; + printf("inbound '%s' msg from peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len); +} + +usdt:./src/bitcoind:net:outbound_message +{ + $peer_id = (int64) arg0; + $peer_addr = str(arg1); + $peer_type = str(arg2); + $msg_type = str(arg3); + $msg_len = arg4; + + printf("outbound '%s' msg to peer %d (%s, %s) with %d bytes\n", $msg_type, $peer_id, $peer_type, $peer_addr, $msg_len); +} + diff --git a/contrib/tracing/log_raw_p2p_msgs.py b/contrib/tracing/log_raw_p2p_msgs.py new file mode 100755 index 0000000000..b5b5755632 --- /dev/null +++ b/contrib/tracing/log_raw_p2p_msgs.py @@ -0,0 +1,180 @@ +#!/usr/bin/env python3 + +""" Demonstration of eBPF limitations and the effect on USDT with the + net:inbound_message and net:outbound_message tracepoints. """ + +# This script shows a limitation of eBPF when data larger than 32kb is passed to +# user-space. It uses BCC (https://github.com/iovisor/bcc) to load a sandboxed +# eBPF program into the Linux kernel (root privileges are required). The eBPF +# program attaches to two statically defined tracepoints. The tracepoint +# 'net:inbound_message' is called when a new P2P message is received, and +# 'net:outbound_message' is called on outbound P2P messages. The eBPF program +# submits the P2P messages to this script via a BPF ring buffer. The submitted +# messages are printed. + +# eBPF Limitations: +# +# Bitcoin P2P messages can be larger than 32kb (e.g. tx, block, ...). The eBPF +# VM's stack is limited to 512 bytes, and we can't allocate more than about 32kb +# for a P2P message in the eBPF VM. The message data is cut off when the message +# is larger than MAX_MSG_DATA_LENGTH (see definition below). This can be detected +# in user-space by comparing the data length to the message length variable. The +# message is cut off when the data length is smaller than the message length. +# A warning is included with the printed message data. +# +# Data is submitted to user-space (i.e. to this script) via a ring buffer. The +# throughput of the ring buffer is limited. Each p2p_message is about 32kb in +# size. In- or outbound messages submitted to the ring buffer in rapid +# succession fill the ring buffer faster than it can be read. Some messages are +# lost. +# +# BCC prints: "Possibly lost 2 samples" on lost messages. + +import sys +from bcc import BPF, USDT + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +#include + +#define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a < _b ? _a : _b; }) + +// Maximum possible allocation size +// from include/linux/percpu.h in the Linux kernel +#define PCPU_MIN_UNIT_SIZE (32 << 10) + +// Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). +#define MAX_PEER_ADDR_LENGTH 62 + 6 +#define MAX_PEER_CONN_TYPE_LENGTH 20 +#define MAX_MSG_TYPE_LENGTH 20 +#define MAX_MSG_DATA_LENGTH PCPU_MIN_UNIT_SIZE - 200 + +struct p2p_message +{ + u64 peer_id; + char peer_addr[MAX_PEER_ADDR_LENGTH]; + char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; + char msg_type[MAX_MSG_TYPE_LENGTH]; + u64 msg_size; + u8 msg[MAX_MSG_DATA_LENGTH]; +}; + +// We can't store the p2p_message struct on the eBPF stack as it is limited to +// 512 bytes and P2P message can be bigger than 512 bytes. However, we can use +// an BPF-array with a length of 1 to allocate up to 32768 bytes (this is +// defined by PCPU_MIN_UNIT_SIZE in include/linux/percpu.h in the Linux kernel). +// Also see https://github.com/iovisor/bcc/issues/2306 +BPF_ARRAY(msg_arr, struct p2p_message, 1); + +// Two BPF perf buffers for pushing data (here P2P messages) to user-space. +BPF_PERF_OUTPUT(inbound_messages); +BPF_PERF_OUTPUT(outbound_messages); + +int trace_inbound_message(struct pt_regs *ctx) { + int idx = 0; + struct p2p_message *msg = msg_arr.lookup(&idx); + + // lookup() does not return a NULL pointer. However, the BPF verifier + // requires an explicit check that that the `msg` pointer isn't a NULL + // pointer. See https://github.com/iovisor/bcc/issues/2595 + if (msg == NULL) return 1; + + bpf_usdt_readarg(1, ctx, &msg->peer_id); + bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg->msg_size); + bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); + + inbound_messages.perf_submit(ctx, msg, sizeof(*msg)); + return 0; +}; + +int trace_outbound_message(struct pt_regs *ctx) { + int idx = 0; + struct p2p_message *msg = msg_arr.lookup(&idx); + + // lookup() does not return a NULL pointer. However, the BPF verifier + // requires an explicit check that that the `msg` pointer isn't a NULL + // pointer. See https://github.com/iovisor/bcc/issues/2595 + if (msg == NULL) return 1; + + bpf_usdt_readarg(1, ctx, &msg->peer_id); + bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg->msg_size); + bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); + + outbound_messages.perf_submit(ctx, msg, sizeof(*msg)); + return 0; +}; +""" + + +def print_message(event, inbound): + print(f"%s %s msg '%s' from peer %d (%s, %s) with %d bytes: %s" % + ( + f"Warning: incomplete message (only %d out of %d bytes)!" % ( + len(event.msg), event.msg_size) if len(event.msg) < event.msg_size else "", + "inbound" if inbound else "outbound", + event.msg_type.decode("utf-8"), + event.peer_id, + event.peer_conn_type.decode("utf-8"), + event.peer_addr.decode("utf-8"), + event.msg_size, + bytes(event.msg[:event.msg_size]).hex(), + ) + ) + + +def main(bitcoind_path): + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="inbound_message", fn_name="trace_inbound_message") + bitcoind_with_usdts.enable_probe( + probe="outbound_message", fn_name="trace_outbound_message") + bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + # BCC: perf buffer handle function for inbound_messages + def handle_inbound(_, data, size): + """ Inbound message handler. + + Called each time a message is submitted to the inbound_messages BPF table.""" + + event = bpf["inbound_messages"].event(data) + print_message(event, True) + + # BCC: perf buffer handle function for outbound_messages + + def handle_outbound(_, data, size): + """ Outbound message handler. + + Called each time a message is submitted to the outbound_messages BPF table.""" + + event = bpf["outbound_messages"].event(data) + print_message(event, False) + + # BCC: add handlers to the inbound and outbound perf buffers + bpf["inbound_messages"].open_perf_buffer(handle_inbound) + bpf["outbound_messages"].open_perf_buffer(handle_outbound) + + print("Logging raw P2P messages.") + print("Messages larger that about 32kb will be cut off!") + print("Some messages might be lost!") + while True: + try: + bpf.perf_buffer_poll() + except KeyboardInterrupt: + exit() + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE:", sys.argv[0], "path/to/bitcoind") + exit() + path = sys.argv[1] + main(path) diff --git a/contrib/tracing/p2p_monitor.py b/contrib/tracing/p2p_monitor.py new file mode 100755 index 0000000000..14e3e3a801 --- /dev/null +++ b/contrib/tracing/p2p_monitor.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python3 + +""" Interactive bitcoind P2P network traffic monitor utilizing USDT and the + net:inbound_message and net:outbound_message tracepoints. """ + +# This script demonstrates what USDT for Bitcoin Core can enable. It uses BCC +# (https://github.com/iovisor/bcc) to load a sandboxed eBPF program into the +# Linux kernel (root privileges are required). The eBPF program attaches to two +# statically defined tracepoints. The tracepoint 'net:inbound_message' is called +# when a new P2P message is received, and 'net:outbound_message' is called on +# outbound P2P messages. The eBPF program submits the P2P messages to +# this script via a BPF ring buffer. + +import sys +import curses +from curses import wrapper, panel +from bcc import BPF, USDT + +# BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into +# a sandboxed Linux kernel VM. +program = """ +#include + +// Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). +// I2P addresses are 60 chars + 6 chars for the port (':12345'). +#define MAX_PEER_ADDR_LENGTH 62 + 6 +#define MAX_PEER_CONN_TYPE_LENGTH 20 +#define MAX_MSG_TYPE_LENGTH 20 + +struct p2p_message +{ + u64 peer_id; + char peer_addr[MAX_PEER_ADDR_LENGTH]; + char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; + char msg_type[MAX_MSG_TYPE_LENGTH]; + u64 msg_size; +}; + + +// Two BPF perf buffers for pushing data (here P2P messages) to user space. +BPF_PERF_OUTPUT(inbound_messages); +BPF_PERF_OUTPUT(outbound_messages); + +int trace_inbound_message(struct pt_regs *ctx) { + struct p2p_message msg = {}; + + bpf_usdt_readarg(1, ctx, &msg.peer_id); + bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg.msg_size); + + inbound_messages.perf_submit(ctx, &msg, sizeof(msg)); + return 0; +}; + +int trace_outbound_message(struct pt_regs *ctx) { + struct p2p_message msg = {}; + + bpf_usdt_readarg(1, ctx, &msg.peer_id); + bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); + bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); + bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); + bpf_usdt_readarg(5, ctx, &msg.msg_size); + + outbound_messages.perf_submit(ctx, &msg, sizeof(msg)); + return 0; +}; +""" + + +class Message: + """ A P2P network message. """ + msg_type = "" + size = 0 + data = bytes() + inbound = False + + def __init__(self, msg_type, size, inbound): + self.msg_type = msg_type + self.size = size + self.inbound = inbound + + +class Peer: + """ A P2P network peer. """ + id = 0 + address = "" + connection_type = "" + last_messages = list() + + total_inbound_msgs = 0 + total_inbound_bytes = 0 + total_outbound_msgs = 0 + total_outbound_bytes = 0 + + def __init__(self, id, address, connection_type): + self.id = id + self.address = address + self.connection_type = connection_type + self.last_messages = list() + + def add_message(self, message): + self.last_messages.append(message) + if len(self.last_messages) > 25: + self.last_messages.pop(0) + if message.inbound: + self.total_inbound_bytes += message.size + self.total_inbound_msgs += 1 + else: + self.total_outbound_bytes += message.size + self.total_outbound_msgs += 1 + + +def main(bitcoind_path): + peers = dict() + + bitcoind_with_usdts = USDT(path=str(bitcoind_path)) + + # attaching the trace functions defined in the BPF program to the tracepoints + bitcoind_with_usdts.enable_probe( + probe="inbound_message", fn_name="trace_inbound_message") + bitcoind_with_usdts.enable_probe( + probe="outbound_message", fn_name="trace_outbound_message") + bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) + + # BCC: perf buffer handle function for inbound_messages + def handle_inbound(_, data, size): + """ Inbound message handler. + + Called each time a message is submitted to the inbound_messages BPF table.""" + event = bpf["inbound_messages"].event(data) + if event.peer_id not in peers: + peer = Peer(event.peer_id, event.peer_addr.decode( + "utf-8"), event.peer_conn_type.decode("utf-8")) + peers[peer.id] = peer + peers[event.peer_id].add_message( + Message(event.msg_type.decode("utf-8"), event.msg_size, True)) + + # BCC: perf buffer handle function for outbound_messages + def handle_outbound(_, data, size): + """ Outbound message handler. + + Called each time a message is submitted to the outbound_messages BPF table.""" + event = bpf["outbound_messages"].event(data) + if event.peer_id not in peers: + peer = Peer(event.peer_id, event.peer_addr.decode( + "utf-8"), event.peer_conn_type.decode("utf-8")) + peers[peer.id] = peer + peers[event.peer_id].add_message( + Message(event.msg_type.decode("utf-8"), event.msg_size, False)) + + # BCC: add handlers to the inbound and outbound perf buffers + bpf["inbound_messages"].open_perf_buffer(handle_inbound) + bpf["outbound_messages"].open_perf_buffer(handle_outbound) + + wrapper(loop, bpf, peers) + + +def loop(screen, bpf, peers): + screen.nodelay(1) + cur_list_pos = 0 + win = curses.newwin(30, 70, 2, 7) + win.erase() + win.border(ord("|"), ord("|"), ord("-"), ord("-"), + ord("-"), ord("-"), ord("-"), ord("-")) + info_panel = panel.new_panel(win) + info_panel.hide() + + ROWS_AVALIABLE_FOR_LIST = curses.LINES - 5 + scroll = 0 + + while True: + try: + # BCC: poll the perf buffers for new events or timeout after 50ms + bpf.perf_buffer_poll(timeout=50) + + ch = screen.getch() + if (ch == curses.KEY_DOWN or ch == ord("j")) and cur_list_pos < len( + peers.keys()) -1 and info_panel.hidden(): + cur_list_pos += 1 + if cur_list_pos >= ROWS_AVALIABLE_FOR_LIST: + scroll += 1 + if (ch == curses.KEY_UP or ch == ord("k")) and cur_list_pos > 0 and info_panel.hidden(): + cur_list_pos -= 1 + if scroll > 0: + scroll -= 1 + if ch == ord('\n') or ch == ord(' '): + if info_panel.hidden(): + info_panel.show() + else: + info_panel.hide() + screen.erase() + render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel) + curses.panel.update_panels() + screen.refresh() + except KeyboardInterrupt: + exit() + + +def render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel): + """ renders the list of peers and details panel + + This code is unrelated to USDT, BCC and BPF. + """ + header_format = "%6s %-20s %-20s %-22s %-67s" + row_format = "%6s %-5d %9d byte %-5d %9d byte %-22s %-67s" + + screen.addstr(0, 1, (" P2P Message Monitor "), curses.A_REVERSE) + screen.addstr( + 1, 0, (" Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages"), curses.A_NORMAL) + screen.addstr(3, 0, + header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), curses.A_BOLD | curses.A_UNDERLINE) + peer_list = sorted(peers.keys())[scroll:ROWS_AVALIABLE_FOR_LIST+scroll] + for i, peer_id in enumerate(peer_list): + peer = peers[peer_id] + screen.addstr(i + 4, 0, + row_format % (peer.id, peer.total_outbound_msgs, peer.total_outbound_bytes, + peer.total_inbound_msgs, peer.total_inbound_bytes, + peer.connection_type, peer.address), + curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL) + if i + scroll == cur_list_pos: + info_window = info_panel.window() + info_window.erase() + info_window.border( + ord("|"), ord("|"), ord("-"), ord("-"), + ord("-"), ord("-"), ord("-"), ord("-")) + + info_window.addstr( + 1, 1, f"PEER {peer.id} ({peer.address})".center(68), curses.A_REVERSE | curses.A_BOLD) + info_window.addstr( + 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", + curses.A_BOLD) + for i, msg in enumerate(peer.last_messages): + if msg.inbound: + info_window.addstr( + i + 3, 1, "%68s" % + (f"<--- {msg.msg_type} ({msg.size} bytes) "), curses.A_NORMAL) + else: + info_window.addstr( + i + 3, 1, " %s (%d byte) --->" % + (msg.msg_type, msg.size), curses.A_NORMAL) + + +if __name__ == "__main__": + if len(sys.argv) < 2: + print("USAGE:", sys.argv[0], "path/to/bitcoind") + exit() + path = sys.argv[1] + main(path) diff --git a/doc/tracing.md b/doc/tracing.md index 4c472b4154..026946b018 100644 --- a/doc/tracing.md +++ b/doc/tracing.md @@ -53,6 +53,46 @@ be found in [contrib/tracing]. The currently available tracepoints are listed here. +### Context `net` + +#### Tracepoint `net:inbound_message` + +Is called when a message is received from a peer over the P2P network. Passes +information about our peer, the connection and the message as arguments. + +Arguments passed: +1. Peer ID as `int64` +2. Peer Address and Port (IPv4, IPv6, Tor v3, I2P, ...) as `pointer to C-style String` (max. length 68 characters) +3. Connection Type (inbound, feeler, outbound-full-relay, ...) as `pointer to C-style String` (max. length 20 characters) +4. Message Type (inv, ping, getdata, addrv2, ...) as `pointer to C-style String` (max. length 20 characters) +5. Message Size in bytes as `uint64` +6. Message Bytes as `pointer to unsigned chars` (i.e. bytes) + +Note: The message is passed to the tracepoint in full, however, due to space +limitations in the eBPF kernel VM it might not be possible to pass the message +to user-space in full. Messages longer than a 32kb might be cut off. This can +be detected in tracing scripts by comparing the message size to the length of +the passed message. + +#### Tracepoint `net:outbound_message` + +Is called when a message is send to a peer over the P2P network. Passes +information about our peer, the connection and the message as arguments. + +Arguments passed: +1. Peer ID as `int64` +2. Peer Address and Port (IPv4, IPv6, Tor v3, I2P, ...) as `pointer to C-style String` (max. length 68 characters) +3. Connection Type (inbound, feeler, outbound-full-relay, ...) as `pointer to C-style String` (max. length 20 characters) +4. Message Type (inv, ping, getdata, addrv2, ...) as `pointer to C-style String` (max. length 20 characters) +5. Message Size in bytes as `uint64` +6. Message Bytes as `pointer to unsigned chars` (i.e. bytes) + +Note: The message is passed to the tracepoint in full, however, due to space +limitations in the eBPF kernel VM it might not be possible to pass the message +to user-space in full. Messages longer than a 32kb might be cut off. This can +be detected in tracing scripts by comparing the message size to the length of +the passed message. + ## Adding tracepoints to Bitcoin Core To add a new tracepoint, `#include ` in the compilation unit where diff --git a/src/net.cpp b/src/net.cpp index 3a1bb138ab..8ef770ede2 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #ifdef WIN32 @@ -3017,11 +3018,20 @@ bool CConnman::NodeFullyConnected(const CNode* pnode) void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) { size_t nMessageSize = msg.data.size(); - LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.m_type), nMessageSize, pnode->GetId()); + LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", msg.m_type, nMessageSize, pnode->GetId()); if (gArgs.GetBoolArg("-capturemessages", false)) { CaptureMessage(pnode->addr, msg.m_type, msg.data, /* incoming */ false); } + TRACE6(net, outbound_message, + pnode->GetId(), + pnode->GetAddrName().c_str(), + pnode->ConnectionTypeAsString().c_str(), + msg.m_type.c_str(), + msg.data.size(), + msg.data.data() + ); + // make sure we use the appropriate network transport format std::vector serializedHeader; pnode->m_serializer->prepareForTransport(msg, serializedHeader); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 70eac468f3..dc36b69802 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -34,6 +34,7 @@ #include // For NDEBUG compile time check #include #include +#include #include #include @@ -4052,6 +4053,15 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic& interrupt } CNetMessage& msg(msgs.front()); + TRACE6(net, inbound_message, + pfrom->GetId(), + pfrom->GetAddrName().c_str(), + pfrom->ConnectionTypeAsString().c_str(), + msg.m_command.c_str(), + msg.m_recv.size(), + msg.m_recv.data() + ); + if (gArgs.GetBoolArg("-capturemessages", false)) { CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /* incoming */ true); } From 8f37f5c2a562c38c83fc40234ade9c301fc4e685 Mon Sep 17 00:00:00 2001 From: 0xb10c <0xb10c@gmail.com> Date: Thu, 20 May 2021 17:53:24 +0200 Subject: [PATCH 063/112] tracing: Tracepoint for connected blocks Can, for example, be used to benchmark block connections. --- contrib/tracing/README.md | 76 +++++++++++ contrib/tracing/connectblock_benchmark.bt | 150 ++++++++++++++++++++++ doc/tracing.md | 22 ++++ src/validation.cpp | 11 ++ 4 files changed, 259 insertions(+) create mode 100755 contrib/tracing/connectblock_benchmark.bt diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md index 2d81e375dc..047354cda1 100644 --- a/contrib/tracing/README.md +++ b/contrib/tracing/README.md @@ -163,3 +163,79 @@ Warning: incomplete message (only 32568 out of 53552 bytes)! inbound msg 'tx' fr … Possibly lost 2 samples ``` + +### connectblock_benchmark.bt + +A `bpftrace` script to benchmark the `ConnectBlock()` function during, for +example, a blockchain re-index. Based on the `validation:block_connected` USDT +tracepoint. + +The script takes three positional arguments. The first two arguments, the start, +and end height indicate between which blocks the benchmark should be run. The +third acts as a duration threshold in milliseconds. When the `ConnectBlock()` +function takes longer than the threshold, information about the block, is +printed. For more details, see the header comment in the script. + +By default, `bpftrace` limits strings to 64 bytes due to the limited stack size +in the kernel VM. Block hashes as zero-terminated hex strings are 65 bytes which +exceed the string limit. The string size limit can be set to 65 bytes with the +environment variable `BPFTRACE_STRLEN`. + +The following command can be used to benchmark, for example, `ConnectBlock()` +between height 20000 and 38000 on SigNet while logging all blocks that take +longer than 25ms to connect. + +``` +$ BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25 +``` + +In a different terminal, starting Bitcoin Core in SigNet mode and with +re-indexing enabled. + +``` +$ ./src/bitcoind -signet -reindex +``` + +This produces the following output. +``` +Attaching 5 probes... +ConnectBlock Benchmark between height 20000 and 38000 inclusive +Logging blocks taking longer than 25 ms to connect. +Starting Connect Block Benchmark between height 20000 and 38000. +BENCH 39 blk/s 59 tx/s 59 inputs/s 20 sigops/s (height 20038) +Block 20492 (000000f555653bb05e2f3c6e79925e01a20dd57033f4dc7c354b46e34735d32b) 20 tx 2319 ins 2318 sigops took 38 ms +BENCH 1840 blk/s 2117 tx/s 4478 inputs/s 2471 sigops/s (height 21879) +BENCH 1816 blk/s 4972 tx/s 4982 inputs/s 125 sigops/s (height 23695) +BENCH 2095 blk/s 2890 tx/s 2910 inputs/s 152 sigops/s (height 25790) +BENCH 1684 blk/s 3979 tx/s 4053 inputs/s 288 sigops/s (height 27474) +BENCH 1155 blk/s 3216 tx/s 3252 inputs/s 115 sigops/s (height 28629) +BENCH 1797 blk/s 2488 tx/s 2503 inputs/s 111 sigops/s (height 30426) +BENCH 1849 blk/s 6318 tx/s 6569 inputs/s 12189 sigops/s (height 32275) +BENCH 946 blk/s 20209 tx/s 20775 inputs/s 83809 sigops/s (height 33221) +Block 33406 (0000002adfe4a15cfcd53bd890a89bbae836e5bb7f38bac566f61ad4548c87f6) 25 tx 2045 ins 2090 sigops took 29 ms +Block 33687 (00000073231307a9828e5607ceb8156b402efe56747271a4442e75eb5b77cd36) 52 tx 1797 ins 1826 sigops took 26 ms +BENCH 582 blk/s 21581 tx/s 27673 inputs/s 60345 sigops/s (height 33803) +BENCH 1035 blk/s 19735 tx/s 19776 inputs/s 51355 sigops/s (height 34838) +Block 35625 (0000006b00b347390c4768ea9df2655e9ff4b120f29d78594a2a702f8a02c997) 20 tx 3374 ins 3371 sigops took 49 ms +BENCH 887 blk/s 17857 tx/s 22191 inputs/s 24404 sigops/s (height 35725) +Block 35937 (000000d816d13d6e39b471cd4368db60463a764ba1f29168606b04a22b81ea57) 75 tx 3943 ins 3940 sigops took 61 ms +BENCH 823 blk/s 16298 tx/s 21031 inputs/s 18440 sigops/s (height 36548) +Block 36583 (000000c3e260556dbf42968aae3f904dba8b8c1ff96a6f6e3aa5365d2e3ad317) 24 tx 2198 ins 2194 sigops took 34 ms +Block 36700 (000000b3b173de9e65a3cfa738d976af6347aaf83fa17ab3f2a4d2ede3ddfac4) 73 tx 1615 ins 1611 sigops took 31 ms +Block 36832 (0000007859578c02c1ac37dabd1b9ec19b98f350b56935f5dd3a41e9f79f836e) 34 tx 1440 ins 1436 sigops took 26 ms +BENCH 613 blk/s 16718 tx/s 25074 inputs/s 23022 sigops/s (height 37161) +Block 37870 (000000f5c1086291ba2d943fb0c3bc82e71c5ee341ee117681d1456fbf6c6c38) 25 tx 1517 ins 1514 sigops took 29 ms +BENCH 811 blk/s 16031 tx/s 20921 inputs/s 18696 sigops/s (height 37972) + +Took 14055 ms to connect the blocks between height 20000 and 38000. + +Histogram of block connection times in milliseconds (ms). +@durations: +[0] 16838 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@| +[1] 882 |@@ | +[2, 4) 236 | | +[4, 8) 23 | | +[8, 16) 9 | | +[16, 32) 9 | | +[32, 64) 4 | | +``` diff --git a/contrib/tracing/connectblock_benchmark.bt b/contrib/tracing/connectblock_benchmark.bt new file mode 100755 index 0000000000..d268eff7f8 --- /dev/null +++ b/contrib/tracing/connectblock_benchmark.bt @@ -0,0 +1,150 @@ +#!/usr/bin/env bpftrace + +/* + + USAGE: + + BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt + + - The environment variable BPFTRACE_STRLEN needs to be set to 65 chars as + strings are limited to 64 chars by default. Hex strings with Bitcoin block + hashes are 64 hex chars + 1 null-termination char. + - sets the height at which the benchmark should start. Setting + the start height to 0 starts the benchmark immediately, even before the + first block is connected. + - sets the height after which the benchmark should end. Setting + the end height to 0 disables the benchmark. The script only logs blocks + over . + - Threshold + + This script requires a 'bitcoind' binary compiled with eBPF support and the + 'validation:block_connected' USDT. By default, it's assumed that 'bitcoind' is + located in './src/bitcoind'. This can be modified in the script below. + + EXAMPLES: + + BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000 + + When run together 'bitcoind -reindex', this benchmarks the time it takes to + connect the blocks between height 300.000 and 680.000 (inclusive) and prints + details about all blocks that take longer than 1000ms to connect. Prints a + histogram with block connection times when the benchmark is finished. + + + BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500 + + When running together 'bitcoind', all newly connected blocks that + take longer than 500ms to connect are logged. A histogram with block + connection times is shown when the script is terminated. + +*/ + +BEGIN +{ + $start_height = $1; + $end_height = $2; + $logging_threshold_ms = $3; + + if ($end_height < $start_height) { + printf("Error: start height (%d) larger than end height (%d)!\n", $start_height, $end_height); + exit(); + } + + if ($end_height > 0) { + printf("ConnectBlock benchmark between height %d and %d inclusive\n", $start_height, $end_height); + } else { + printf("ConnectBlock logging starting at height %d\n", $start_height); + } + + if ($logging_threshold_ms > 0) { + printf("Logging blocks taking longer than %d ms to connect.\n", $3); + } + + if ($start_height == 0) { + @start = nsecs; + } +} + +/* + Attaches to the 'validation:block_connected' USDT and collects stats when the + connected block is between the start and end height (or the end height is + unset). +*/ +usdt:./src/bitcoind:validation:block_connected /arg1 >= $1 && (arg1 <= $2 || $2 == 0 )/ +{ + $height = arg1; + $transactions = arg2; + $inputs = arg3; + $sigops = arg4; + $duration = (uint64) arg5; + + @height = $height; + + @blocks = @blocks + 1; + @transactions = @transactions + $transactions; + @inputs = @inputs + $inputs; + @sigops = @sigops + $sigops; + + @durations = hist($duration / 1000); + + if ($height == $1 && $height != 0) { + @start = nsecs; + printf("Starting Connect Block Benchmark between height %d and %d.\n", $1, $2); + } + + if ($2 > 0 && $height >= $2) { + @end = nsecs; + $duration = @end - @start; + printf("\nTook %d ms to connect the blocks between height %d and %d.\n", $duration / 1000000, $1, $2); + exit(); + } +} + +/* + Attaches to the 'validation:block_connected' USDT and logs information about + blocks where the time it took to connect the block is above the + . +*/ +usdt:./src/bitcoind:validation:block_connected / (uint64) arg5 / 1000> $3 / +{ + $hash_str = str(arg0); + $height = (int32) arg1; + $transactions = (uint64) arg2; + $inputs = (int32) arg3; + $sigops = (int64) arg4; + $duration = (int64) arg5; + + printf("Block %d (%s) %4d tx %5d ins %5d sigops took %4d ms\n", $height, $hash_str, $transactions, $inputs, $sigops, (uint64) $duration / 1000); +} + + +/* + Prints stats about the blocks, transactions, inputs, and sigops processed in + the last second (if any). +*/ +interval:s:1 { + if (@blocks > 0) { + printf("BENCH %4d blk/s %6d tx/s %7d inputs/s %8d sigops/s (height %d)\n", @blocks, @transactions, @inputs, @sigops, @height); + + zero(@blocks); + zero(@transactions); + zero(@inputs); + zero(@sigops); + } +} + +END +{ + printf("\nHistogram of block connection times in milliseconds (ms).\n"); + print(@durations); + + clear(@durations); + clear(@blocks); + clear(@transactions); + clear(@inputs); + clear(@sigops); + clear(@height); + clear(@start); + clear(@end); +} + diff --git a/doc/tracing.md b/doc/tracing.md index 026946b018..1242a0d250 100644 --- a/doc/tracing.md +++ b/doc/tracing.md @@ -93,6 +93,28 @@ to user-space in full. Messages longer than a 32kb might be cut off. This can be detected in tracing scripts by comparing the message size to the length of the passed message. +### Context `validation` + +#### Tracepoint `validation:block_connected` + +Is called *after* a block is connected to the chain. Can, for example, be used +to benchmark block connections together with `-reindex`. + +Arguments passed: +1. Block Header Hash as `pointer to C-style String` (64 characters) +2. Block Height as `int32` +3. Transactions in the Block as `uint64` +4. Inputs spend in the Block as `int32` +5. SigOps in the Block (excluding coinbase SigOps) `uint64` +6. Time it took to connect the Block in microseconds (µs) as `uint64` +7. Block Header Hash as `pointer to unsigned chars` (i.e. 32 bytes in little-endian) + +Note: The 7th argument can't be accessed by bpftrace and is purposefully chosen +to be the block header hash as bytes. See [bpftrace argument limit] for more +details. + +[bpftrace argument limit]: #bpftrace-argument-limit + ## Adding tracepoints to Bitcoin Core To add a new tracepoint, `#include ` in the compilation unit where diff --git a/src/validation.cpp b/src/validation.cpp index 4e7bc635da..20d641bf40 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -1997,6 +1998,16 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, int64_t nTime6 = GetTimeMicros(); nTimeCallbacks += nTime6 - nTime5; LogPrint(BCLog::BENCH, " - Callbacks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime6 - nTime5), nTimeCallbacks * MICRO, nTimeCallbacks * MILLI / nBlocksTotal); + TRACE7(validation, block_connected, + block.GetHash().ToString().c_str(), + pindex->nHeight, + block.vtx.size(), + nInputs, + nSigOpsCost, + GetTimeMicros() - nTimeStart, // in microseconds (µs) + block.GetHash().data() + ); + return true; } From a3f6397c7390ca033311f2219094ac90f6e582f9 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Wed, 23 Jun 2021 17:05:44 +0200 Subject: [PATCH 064/112] test: feature_rbf.py: make MiniWallet instance available for all sub-tests also document on why we start scanning blocks at height 76 --- test/functional/feature_rbf.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index dc6ebcb227..2db11b26f0 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -84,6 +84,12 @@ def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): + self.wallet = MiniWallet(self.nodes[0]) + # the pre-mined test framework chain contains coinbase outputs to the + # MiniWallet's default address ADDRESS_BCRT1_P2WSH_OP_TRUE in blocks + # 76-100 (see method BitcoinTestFramework._initialize_chain()) + self.wallet.scan_blocks(start=76, num=1) + self.log.info("Running test simple doublespend...") self.test_simple_doublespend() @@ -569,12 +575,10 @@ def test_rpc(self): assert_equal(json1["vin"][0]["sequence"], 4294967294) def test_no_inherited_signaling(self): - wallet = MiniWallet(self.nodes[0]) - wallet.scan_blocks(start=76, num=1) - confirmed_utxo = wallet.get_utxo() + confirmed_utxo = self.wallet.get_utxo() # Create an explicitly opt-in parent transaction - optin_parent_tx = wallet.send_self_transfer( + optin_parent_tx = self.wallet.send_self_transfer( from_node=self.nodes[0], utxo_to_spend=confirmed_utxo, sequence=BIP125_SEQUENCE_NUMBER, @@ -582,7 +586,7 @@ def test_no_inherited_signaling(self): ) assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable']) - replacement_parent_tx = wallet.create_self_transfer( + replacement_parent_tx = self.wallet.create_self_transfer( from_node=self.nodes[0], utxo_to_spend=confirmed_utxo, sequence=BIP125_SEQUENCE_NUMBER, @@ -596,8 +600,8 @@ def test_no_inherited_signaling(self): assert_equal(res['allowed'], True) # Create an opt-out child tx spending the opt-in parent - parent_utxo = wallet.get_utxo(txid=optin_parent_tx['txid']) - optout_child_tx = wallet.send_self_transfer( + parent_utxo = self.wallet.get_utxo(txid=optin_parent_tx['txid']) + optout_child_tx = self.wallet.send_self_transfer( from_node=self.nodes[0], utxo_to_spend=parent_utxo, sequence=0xffffffff, @@ -607,7 +611,7 @@ def test_no_inherited_signaling(self): # Reports true due to inheritance assert_equal(True, self.nodes[0].getmempoolentry(optout_child_tx['txid'])['bip125-replaceable']) - replacement_child_tx = wallet.create_self_transfer( + replacement_child_tx = self.wallet.create_self_transfer( from_node=self.nodes[0], utxo_to_spend=parent_utxo, sequence=0xffffffff, @@ -626,9 +630,7 @@ def test_no_inherited_signaling(self): assert_raises_rpc_error(-26, 'txn-mempool-conflict', self.nodes[0].sendrawtransaction, replacement_child_tx["hex"], 0) def test_replacement_relay_fee(self): - wallet = MiniWallet(self.nodes[0]) - wallet.scan_blocks(start=77, num=1) - tx = wallet.send_self_transfer(from_node=self.nodes[0])['tx'] + tx = self.wallet.send_self_transfer(from_node=self.nodes[0])['tx'] # Higher fee, higher feerate, different txid, but the replacement does not provide a relay # fee conforming to node's `incrementalrelayfee` policy of 1000 sat per KB. From d596dba9877e7ead3fb5426cbe7e608fbcbfe3eb Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Thu, 22 Jul 2021 13:29:39 +0200 Subject: [PATCH 065/112] test: assert logging categories are sorted in rpc and help --- test/functional/rpc_misc.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py index 9ad7827c8a..563f2ea43e 100755 --- a/test/functional/rpc_misc.py +++ b/test/functional/rpc_misc.py @@ -54,7 +54,7 @@ def run_test(self): assert_raises_rpc_error(-8, "unknown mode foobar", node.getmemoryinfo, mode="foobar") - self.log.info("test logging rpc") + self.log.info("test logging rpc and help") # Test logging RPC returns the expected number of logging categories. assert_equal(len(node.logging()), 24) @@ -66,6 +66,15 @@ def run_test(self): node.logging(include=['qt']) assert_equal(node.logging()['qt'], True) + # Test logging RPC returns the logging categories in alphabetical order. + sorted_logging_categories = sorted(node.logging()) + assert_equal(list(node.logging()), sorted_logging_categories) + + # Test logging help returns the logging categories string in alphabetical order. + categories = ', '.join(sorted_logging_categories) + logging_help = self.nodes[0].help('logging') + assert f"valid logging categories are: {categories}" in logging_help + self.log.info("test echoipc (testing spawned process in multiprocess build)") assert_equal(node.echoipc("hello"), "hello") From aa02c64540cd77199c2834549786b9bc91fd4bc9 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Wed, 23 Jun 2021 17:38:14 +0200 Subject: [PATCH 066/112] test: use MiniWallet for simple doublespend test in feature_rbf.py --- test/functional/feature_rbf.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index 2db11b26f0..65929704eb 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -4,6 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the RBF code.""" +from copy import deepcopy from decimal import Decimal from test_framework.blocktools import COINBASE_MATURITY @@ -88,7 +89,7 @@ def run_test(self): # the pre-mined test framework chain contains coinbase outputs to the # MiniWallet's default address ADDRESS_BCRT1_P2WSH_OP_TRUE in blocks # 76-100 (see method BitcoinTestFramework._initialize_chain()) - self.wallet.scan_blocks(start=76, num=1) + self.wallet.scan_blocks(start=76, num=2) self.log.info("Running test simple doublespend...") self.test_simple_doublespend() @@ -130,24 +131,17 @@ def run_test(self): def test_simple_doublespend(self): """Simple doublespend""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) - - # make_utxo may have generated a bunch of blocks, so we need to sync - # before we can spend the coins generated, or else the resulting - # transactions might not be accepted by our peers. - self.sync_all() + # we use MiniWallet to create a transaction template with inputs correctly set, + # and modify the output (amount, scriptPubKey) according to our needs + tx_template = self.wallet.create_self_transfer(from_node=self.nodes[0])['tx'] - tx1a = CTransaction() - tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1a = deepcopy(tx_template) tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx1a_hex = tx1a.serialize().hex() tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) - self.sync_all() - # Should fail because we haven't changed the fee - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] + tx1b = deepcopy(tx_template) tx1b.vout = [CTxOut(1 * COIN, DUMMY_2_P2WPKH_SCRIPT)] tx1b_hex = tx1b.serialize().hex() @@ -155,9 +149,7 @@ def test_simple_doublespend(self): assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0) # Extra 0.1 BTC fee - tx1b = CTransaction() - tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)] + tx1b.vout[0].nValue -= int(0.1 * COIN) tx1b_hex = tx1b.serialize().hex() # Works when enabled tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0) From d080c27066449f76bc8709fc50e422757971d2cf Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Thu, 22 Jul 2021 18:25:06 -0400 Subject: [PATCH 067/112] guix, doc: Add a note that codesigners need to rebuild after tagging One of the issues observed during the 22.0rc1 release process was that a codesigner's attestation mismatched non-codesigner attestations because the guix-codesign step was performed prior to tagging the version in bitcoin-detached-sigs. --- doc/release-process.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/release-process.md b/doc/release-process.md index e375ae976a..c57fa5b23a 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -160,6 +160,9 @@ Codesigner only: Sign the windows binaries: Enter the passphrase for the key when prompted signature-win.tar.gz will be created +Code-signer only: It is advised to test that the code signature attaches properly prior to tagging by performing the `guix-codesign` step. +However if this is done, once the release has been tagged in the bitcoin-detached-sigs repo, the `guix-codesign` step must be performed again in order for the guix attestation to be valid when compared against the attestations of non-codesigner builds. + Codesigner only: Commit the detached codesign payloads: ```sh From 43225f0a2a517ccd79dc49279b979ffd2eca6b85 Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Thu, 22 Jul 2021 19:10:40 -0400 Subject: [PATCH 068/112] guix: Remove extra \r from all.SHA256SUMS line ending guix-attest mistakenly added an extra \r to the line endings in all.SHA256SUMS, causing guix-verify to erroneously fail. Co-Authored-By: Carl Dong --- contrib/guix/guix-attest | 1 - 1 file changed, 1 deletion(-) diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest index 396cb39895..dcf709b542 100755 --- a/contrib/guix/guix-attest +++ b/contrib/guix/guix-attest @@ -216,7 +216,6 @@ mkdir -p "$outsigdir" cat "${sha256sum_fragments[@]}" \ | sort -u \ | sort -k2 \ - | sed 's/$/\r/' \ | rfc4880_normalize_document \ > "$temp_all" if [ -e all.SHA256SUMS ]; then From 9b313dfef18792fcc36e78ef3caa693fafcce04e Mon Sep 17 00:00:00 2001 From: Andrew Chow Date: Fri, 23 Jul 2021 15:15:36 -0400 Subject: [PATCH 069/112] guix: Ensure EPOCH_SOURCE_DATE does not include GPG information If the user has set log.showSignature=true in their git config, then the git log will always output GPG signature information. Since git log is used to set EPOCH_SOURCE_DATE, this will mistakenly have GPG signature information in it which causes issues for the build. To avoid this issue, we override the config and force log.showSignature=false. --- contrib/guix/guix-build | 2 +- contrib/guix/guix-codesign | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build index f6da8435e9..3d9335b4b0 100755 --- a/contrib/guix/guix-build +++ b/contrib/guix/guix-build @@ -232,7 +232,7 @@ host_to_commonname() { } # Determine the reference time used for determinism (overridable by environment) -SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}" # Execute "$@" in a pinned, possibly older version of Guix, for reproducibility # across time. diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign index 11610a92e1..3f464f89e6 100755 --- a/contrib/guix/guix-codesign +++ b/contrib/guix/guix-codesign @@ -220,7 +220,7 @@ fi JOBS="${JOBS:-$(nproc)}" # Determine the reference time used for determinism (overridable by environment) -SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" +SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --format=%at -1)}" # Execute "$@" in a pinned, possibly older version of Guix, for reproducibility # across time. From 8a4f0fcd3fc1a35c1482975114555b0fed75a1c0 Mon Sep 17 00:00:00 2001 From: Alex Groce Date: Wed, 28 Jul 2021 13:03:08 -0700 Subject: [PATCH 070/112] Document faster throughput configuration --- doc/fuzzing.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/fuzzing.md b/doc/fuzzing.md index 6fc9077e4c..6605749557 100644 --- a/doc/fuzzing.md +++ b/doc/fuzzing.md @@ -83,6 +83,10 @@ INFO: seed corpus: files: 991 min: 1b max: 1858b total: 288291b rss: 150Mb … ``` +## Run without sanitizers for increased throughput + +Fuzzing on a harness compiled with `--with-sanitizers=address,fuzzer,undefined` is good for finding bugs. However, the very slow execution even under libFuzzer will limit the ability to find new coverage. A good approach is to perform occasional long runs without the additional bug-detectors (configure `--with-sanitizers=fuzzer`) and then merge new inputs into a corpus as described in the qa-assets repo (https://github.com/bitcoin-core/qa-assets/blob/main/.github/PULL_REQUEST_TEMPLATE.md). Patience is useful; even with improved throughput, libFuzzer may need days and 10s of millions of executions to reach deep/hard targets. + ## Reproduce a fuzzer crash reported by the CI - `cd` into the `qa-assets` directory and update it with `git pull qa-assets` From f8f772dc495f7d3ee02d75d42ee22ceb90c3a693 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Wed, 21 Jul 2021 12:31:04 +0200 Subject: [PATCH 071/112] macdeploy: alternative info to download the macOS SDK The previous link wasn't accessible for me, this adds some instructions given to me by Hebasto on #bitcoin-core-builds as well as a shasum for the archive to quickly check the downloaded one is the right one before processing with the entire Guix build. This also corrects a link to an older version of the SDK currently in use. Signed-off-by: Antoine Poinsot --- contrib/macdeploy/README.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md index 21f6ba2eb3..1bb8b2aa17 100644 --- a/contrib/macdeploy/README.md +++ b/contrib/macdeploy/README.md @@ -16,7 +16,10 @@ Our current macOS SDK (`Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`) can be extracted from [Xcode_12.1.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip). -An Apple ID is needed to download this. +Alternatively, after logging in to your account go to 'Downloads', then 'More' +and look for [`Xcode_12.1`](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip). +An Apple ID and cookies enabled for the hostname are needed to download this. +The `sha256sum` of the archive should be `612443b1894b39368a596ea1607f30cbb0481ad44d5e29c75edb71a6d2cf050f`. After Xcode version 7.x, Apple started shipping the `Xcode.app` in a `.xip` archive. This makes the SDK less-trivial to extract on non-macOS machines. One @@ -76,7 +79,7 @@ and its `libLTO.so` rather than those from `llvmgcc`, as it was originally done To complicate things further, all builds must target an Apple SDK. These SDKs are free to download, but not redistributable. To obtain it, register for an Apple Developer Account, -then download [Xcode_11.3.1](https://download.developer.apple.com/Developer_Tools/Xcode_11.3.1/Xcode_11.3.1.xip). +then download [Xcode_12.1](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip). This file is many gigabytes in size, but most (but not all) of what we need is contained only in a single directory: From fa865287e5f35e0a376785834e966dd202d2959e Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Thu, 29 Jul 2021 18:59:11 +0200 Subject: [PATCH 072/112] test: Add temporary sanitizer suppression implicit-signed-integer-truncation:netaddress.cpp --- test/sanitizer_suppressions/ubsan | 1 + 1 file changed, 1 insertion(+) diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 2850cfcea5..b52e105a33 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -89,6 +89,7 @@ implicit-signed-integer-truncation:leveldb/ implicit-signed-integer-truncation:miner.cpp implicit-signed-integer-truncation:net.cpp implicit-signed-integer-truncation:net_processing.cpp +implicit-signed-integer-truncation:netaddress.cpp implicit-signed-integer-truncation:streams.h implicit-signed-integer-truncation:test/arith_uint256_tests.cpp implicit-signed-integer-truncation:test/skiplist_tests.cpp From 2fcaec7bbb96d6fe72a7e3a5744b0c35c79733e8 Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 22 Mar 2021 15:48:17 -0700 Subject: [PATCH 073/112] [net_processing] Introduce SetupAddressRelay Idempotent function that initializes m_addr_known for connections that support address relay (anything other than block-relay-only). Unused until the next commit. --- src/net_processing.cpp | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index a0c346b99f..1d53d1a1b7 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -226,7 +226,7 @@ struct Peer { std::vector m_addrs_to_send; /** Probabilistic filter of addresses that this peer already knows. * Used to avoid relaying addresses to this peer more than once. */ - const std::unique_ptr m_addr_known; + std::unique_ptr m_addr_known; /** Whether a getaddr request to this peer is outstanding. */ bool m_getaddr_sent{false}; /** Guards address sending timers. */ @@ -612,6 +612,14 @@ class PeerManagerImpl final : public PeerManager * @param[in] vRecv The raw message received */ void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv); + + /** Checks if address relay is permitted with peer. Initializes + * `m_addr_known` bloom filter if needed. + * + * @return True if address relay is enabled with peer + * False if address relay is disallowed + */ + bool SetupAddressRelay(CNode& node, Peer& peer); }; } // namespace @@ -4423,6 +4431,22 @@ class CompareInvMempoolOrder }; } +bool PeerManagerImpl::SetupAddressRelay(CNode& node, Peer& peer) +{ + // We don't participate in addr relay with outbound block-relay-only + // connections to prevent providing adversaries with the additional + // information of addr traffic to infer the link. + if (node.IsBlockOnlyConn()) return false; + + if (!RelayAddrsWithPeer(peer)) { + // First addr message we have received from the peer, initialize + // m_addr_known + peer.m_addr_known = std::make_unique(5000, 0.001); + } + + return true; +} + bool PeerManagerImpl::SendMessages(CNode* pto) { PeerRef peer = GetPeerRef(pto->GetId()); From 6653fa3328b5608fcceda1c6ea8e68c5d58739ec Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 21 Jun 2021 19:15:07 -0700 Subject: [PATCH 074/112] [test] Update p2p_addr_relay test to prepare Use an init param to make clear whether a getaddr message should be sent when the P2PConnection receives a version message. These changes are in preparation for upcoming commits that modify the behavior of a bitcoind node and the test framework. --- test/functional/p2p_addr_relay.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index ff1d85a9be..7b7bcfc917 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -11,7 +11,8 @@ NODE_NETWORK, NODE_WITNESS, msg_addr, - msg_getaddr + msg_getaddr, + msg_verack ) from test_framework.p2p import ( P2PInterface, @@ -27,10 +28,12 @@ class AddrReceiver(P2PInterface): num_ipv4_received = 0 test_addr_contents = False _tokens = 1 + send_getaddr = True - def __init__(self, test_addr_contents=False): + def __init__(self, test_addr_contents=False, send_getaddr=True): super().__init__() self.test_addr_contents = test_addr_contents + self.send_getaddr = send_getaddr def on_addr(self, message): for addr in message.addrs: @@ -60,6 +63,11 @@ def increment_tokens(self, n): def addr_received(self): return self.num_ipv4_received != 0 + def on_version(self, message): + self.send_message(msg_verack()) + if (self.send_getaddr): + self.send_message(msg_getaddr()) + def getaddr_received(self): return self.message_count['getaddr'] > 0 @@ -156,7 +164,7 @@ def relay_tests(self): self.nodes[0].disconnect_p2ps() self.log.info('Check relay of addresses received from outbound peers') - inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)) + inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True, send_getaddr=False)) full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") msg = self.setup_addr_msg(2) self.send_addr_msg(full_outbound_peer, msg, [inbound_peer]) @@ -185,6 +193,12 @@ def relay_tests(self): self.nodes[0].disconnect_p2ps() def getaddr_tests(self): + # In the previous tests, the node answered GETADDR requests with an + # empty addrman. Due to GETADDR response caching (see + # CConnman::GetAddresses), the node would continue to provide 0 addrs + # in response until enough time has passed or the node is restarted. + self.restart_node(0) + self.log.info('Test getaddr behavior') self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer') full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") @@ -197,7 +211,7 @@ def getaddr_tests(self): assert_equal(block_relay_peer.getaddr_received(), False) self.log.info('Check that we answer getaddr messages only from inbound peers') - inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver()) + inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(send_getaddr=False)) inbound_peer.sync_with_ping() # Add some addresses to addrman From 1d1ef2db7ea0d93c7dab4a9800ec74afa7a019eb Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Wed, 24 Mar 2021 15:01:05 -0700 Subject: [PATCH 075/112] [net_processing] Defer initializing m_addr_known Use SetupAddressRelay to only initialize `m_addr_known` as needed. For outbound peers, we initialize the filter before sending our self announcement (not applicable for block-relay-only connections). For inbound peers, we initialize the filter when we get an addr related message (ADDR, ADDRV2, GETADDR). These changes intend to mitigate address blackholes. Since an inbound peer has to send us an addr related message to become eligible as a candidate for addr relay, this should reduce our likelihood of sending them self-announcements. --- src/net_processing.cpp | 29 +++++++++++++++++---------- test/functional/p2p_addr_relay.py | 3 +++ test/functional/test_framework/p2p.py | 1 + 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1d53d1a1b7..fe20421a21 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -224,8 +224,13 @@ struct Peer { /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ std::vector m_addrs_to_send; - /** Probabilistic filter of addresses that this peer already knows. - * Used to avoid relaying addresses to this peer more than once. */ + /** Probabilistic filter to track recent addr messages relayed with this + * peer. Used to avoid relaying redundant addresses to this peer. + * + * We initialize this filter for outbound peers (other than + * block-relay-only connections) or when an inbound peer sends us an + * address related message (ADDR, ADDRV2, GETADDR). + **/ std::unique_ptr m_addr_known; /** Whether a getaddr request to this peer is outstanding. */ bool m_getaddr_sent{false}; @@ -258,9 +263,8 @@ struct Peer { /** Work queue of items requested by this peer **/ std::deque m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); - explicit Peer(NodeId id, bool addr_relay) + explicit Peer(NodeId id) : m_id(id) - , m_addr_known{addr_relay ? std::make_unique(5000, 0.001) : nullptr} {} }; @@ -1125,9 +1129,7 @@ void PeerManagerImpl::InitializeNode(CNode *pnode) assert(m_txrequest.Count(nodeid) == 0); } { - // Addr relay is disabled for outbound block-relay-only peers to - // prevent adversaries from inferring these links from addr traffic. - PeerRef peer = std::make_shared(nodeid, /* addr_relay = */ !pnode->IsBlockOnlyConn()); + PeerRef peer = std::make_shared(nodeid); LOCK(m_peer_mutex); m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer)); } @@ -2580,7 +2582,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, UpdatePreferredDownload(pfrom, State(pfrom.GetId())); } - if (!pfrom.IsInboundConn() && !pfrom.IsBlockOnlyConn()) { + // Self advertisement & GETADDR logic + if (!pfrom.IsInboundConn() && SetupAddressRelay(pfrom, *peer)) { // For outbound peers, we try to relay our address (so that other // nodes can try to find us more quickly, as we have no guarantee // that an outbound peer is even aware of how to reach us) and do a @@ -2589,8 +2592,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // empty and no one will know who we are, so these mechanisms are // important to help us connect to the network. // - // We skip this for block-relay-only peers to avoid potentially leaking - // information about our block-relay-only connections via address relay. + // We skip this for block-relay-only peers. We want to avoid + // potentially leaking addr information and we do not want to + // indicate to the peer that we will participate in addr relay. if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload()) { CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices()); @@ -2788,10 +2792,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, s >> vAddr; - if (!RelayAddrsWithPeer(*peer)) { + if (!SetupAddressRelay(pfrom, *peer)) { LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId()); return; } + if (vAddr.size() > MAX_ADDR_TO_SEND) { Misbehaving(pfrom.GetId(), 20, strprintf("%s message size = %u", msg_type, vAddr.size())); @@ -3725,6 +3730,8 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + SetupAddressRelay(pfrom, *peer); + // Only send one GetAddr response per connection to reduce resource waste // and discourage addr stamping of INV announcements. if (peer->m_getaddr_recvd) { diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 7b7bcfc917..c8c1120462 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -175,6 +175,9 @@ def relay_tests(self): # of the outbound peer which is often sent before the GETADDR response. assert_equal(inbound_peer.num_ipv4_received, 0) + # Send an empty ADDR message to intialize address relay on this connection. + inbound_peer.send_and_ping(msg_addr()) + self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed') msg2 = self.setup_addr_msg(2) self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer]) diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index cc80b543cd..b7d5bd8fab 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -438,6 +438,7 @@ def on_version(self, message): self.send_message(msg_sendaddrv2()) self.send_message(msg_verack()) self.nServices = message.nServices + self.send_message(msg_getaddr()) # Connection helper methods From 201e4964816f8896cfe7b4f6d8ddbfffe7102f87 Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 26 Jul 2021 11:48:58 -0700 Subject: [PATCH 076/112] [net_processing] Introduce new field to indicate if addr relay is enabled --- src/net_processing.cpp | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index fe20421a21..e9bb4f2211 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -230,8 +230,25 @@ struct Peer { * We initialize this filter for outbound peers (other than * block-relay-only connections) or when an inbound peer sends us an * address related message (ADDR, ADDRV2, GETADDR). + * + * Presence of this filter must correlate with m_addr_relay_enabled. **/ std::unique_ptr m_addr_known; + /** Whether we are participating in address relay with this connection. + * + * We set this bool to true for outbound peers (other than + * block-relay-only connections), or when an inbound peer sends us an + * address related message (ADDR, ADDRV2, GETADDR). + * + * We use this bool to decide whether a peer is eligible for gossiping + * addr messages. This avoids relaying to peers that are unlikely to + * forward them, effectively blackholing self announcements. Reasons + * peers might support addr relay on the link include that they connected + * to us as a block-relay-only peer or they are a light client. + * + * This field must correlate with whether m_addr_known has been + * initialized.*/ + std::atomic_bool m_addr_relay_enabled{false}; /** Whether a getaddr request to this peer is outstanding. */ bool m_getaddr_sent{false}; /** Guards address sending timers. */ @@ -617,8 +634,8 @@ class PeerManagerImpl final : public PeerManager */ void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv); - /** Checks if address relay is permitted with peer. Initializes - * `m_addr_known` bloom filter if needed. + /** Checks if address relay is permitted with peer. If needed, initializes + * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. * * @return True if address relay is enabled with peer * False if address relay is disallowed @@ -746,7 +763,7 @@ static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { static bool RelayAddrsWithPeer(const Peer& peer) { - return peer.m_addr_known != nullptr; + return peer.m_addr_relay_enabled; } /** @@ -4449,6 +4466,7 @@ bool PeerManagerImpl::SetupAddressRelay(CNode& node, Peer& peer) // First addr message we have received from the peer, initialize // m_addr_known peer.m_addr_known = std::make_unique(5000, 0.001); + peer.m_addr_relay_enabled = true; } return true; From c061599e40dc3d379c10b914765061a7a8449dd7 Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 26 Jul 2021 11:54:07 -0700 Subject: [PATCH 077/112] [net_processing] Remove RelayAddrsWithPeer function Now that we have a simple boolean stored on the field, the wrapper function is no longer necessary. --- src/net_processing.cpp | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index e9bb4f2211..5103824ee1 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -761,11 +761,6 @@ static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return &it->second; } -static bool RelayAddrsWithPeer(const Peer& peer) -{ - return peer.m_addr_relay_enabled; -} - /** * Whether the peer supports the address. For example, a peer that does not * implement BIP155 cannot receive Tor v3 addresses because it requires @@ -1708,7 +1703,7 @@ void PeerManagerImpl::RelayAddress(NodeId originator, LOCK(m_peer_mutex); for (auto& [id, peer] : m_peer_map) { - if (RelayAddrsWithPeer(*peer) && id != originator && IsAddrCompatible(*peer, addr)) { + if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) { uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize(); for (unsigned int i = 0; i < nRelayNodes; i++) { if (hashKey > best[i].first) { @@ -4327,7 +4322,7 @@ void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::mic void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) { // Nothing to do for non-address-relay peers - if (!RelayAddrsWithPeer(peer)) return; + if (!peer.m_addr_relay_enabled) return; LOCK(peer.m_addr_send_times_mutex); // Periodically advertise our local address to the peer. @@ -4462,11 +4457,10 @@ bool PeerManagerImpl::SetupAddressRelay(CNode& node, Peer& peer) // information of addr traffic to infer the link. if (node.IsBlockOnlyConn()) return false; - if (!RelayAddrsWithPeer(peer)) { + if (!peer.m_addr_relay_enabled.exchange(true)) { // First addr message we have received from the peer, initialize // m_addr_known peer.m_addr_known = std::make_unique(5000, 0.001); - peer.m_addr_relay_enabled = true; } return true; From 0980ca78cd930a00c9985d7f00083a3b8e8be89e Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Sun, 13 Jun 2021 22:08:54 -0700 Subject: [PATCH 078/112] [test] Test that we intentionally select addr relay peers. This test checks that we only relay addresses with inbound peers who have sent us an addr related message. Uses a combination of GETADDR and ADDR to verify when peers are eligible. --- test/functional/p2p_addr_relay.py | 49 ++++++++++++++++++++++++++++++- 1 file changed, 48 insertions(+), 1 deletion(-) diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index c8c1120462..4233d8d010 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -19,7 +19,7 @@ p2p_lock, ) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal +from test_framework.util import assert_equal, assert_greater_than import random import time @@ -83,6 +83,10 @@ def set_test_params(self): def run_test(self): self.oversized_addr_test() self.relay_tests() + self.inbound_blackhole_tests() + + # This test populates the addrman, which can impact the node's behavior + # in subsequent tests self.getaddr_tests() self.blocksonly_mode_tests() self.rate_limit_tests() @@ -195,6 +199,49 @@ def relay_tests(self): self.nodes[0].disconnect_p2ps() + def sum_addr_messages(self, msgs_dict): + return sum(bytes_received for (msg, bytes_received) in msgs_dict.items() if msg in ['addr', 'addrv2', 'getaddr']) + + def inbound_blackhole_tests(self): + self.log.info('Check that we only relay addresses to inbound peers who have previously sent us addr related messages') + + addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) + receiver_peer = self.nodes[0].add_p2p_connection(AddrReceiver()) + blackhole_peer = self.nodes[0].add_p2p_connection(AddrReceiver(send_getaddr=False)) + initial_addrs_received = receiver_peer.num_ipv4_received + + # addr_source sends 2 addresses to node0 + msg = self.setup_addr_msg(2) + addr_source.send_and_ping(msg) + self.mocktime += 30 * 60 + self.nodes[0].setmocktime(self.mocktime) + receiver_peer.sync_with_ping() + blackhole_peer.sync_with_ping() + + peerinfo = self.nodes[0].getpeerinfo() + + # Confirm node received addr-related messages from receiver peer + assert_greater_than(self.sum_addr_messages(peerinfo[1]['bytesrecv_per_msg']), 0) + # And that peer received addresses + assert_equal(receiver_peer.num_ipv4_received - initial_addrs_received, 2) + + # Confirm node has not received addr-related messages from blackhole peer + assert_equal(self.sum_addr_messages(peerinfo[2]['bytesrecv_per_msg']), 0) + # And that peer did not receive addresses + assert_equal(blackhole_peer.num_ipv4_received, 0) + + self.log.info("After blackhole peer sends addr message, it becomes eligible for addr gossip") + blackhole_peer.send_and_ping(msg_addr()) + msg = self.setup_addr_msg(2) + self.send_addr_msg(addr_source, msg, [receiver_peer, blackhole_peer]) + + # Confirm node has now received addr-related messages from blackhole peer + assert_greater_than(self.sum_addr_messages(peerinfo[1]['bytesrecv_per_msg']), 0) + # And that peer received addresses + assert_equal(blackhole_peer.num_ipv4_received, 2) + + self.nodes[0].disconnect_p2ps() + def getaddr_tests(self): # In the previous tests, the node answered GETADDR requests with an # empty addrman. Due to GETADDR response caching (see From 3893da06db1eb622f540605700f8663f8d87b2df Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 26 Jul 2021 12:08:07 -0700 Subject: [PATCH 079/112] [RPC] Add field to getpeerinfo to indicate if addr relay is enabled --- src/net_processing.cpp | 1 + src/net_processing.h | 1 + src/rpc/net.cpp | 2 ++ 3 files changed, 4 insertions(+) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 5103824ee1..24b55eb5cf 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1279,6 +1279,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c stats.m_ping_wait = ping_wait; stats.m_addr_processed = peer->m_addr_processed.load(); stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); + stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load(); return true; } diff --git a/src/net_processing.h b/src/net_processing.h index c537efb5db..4532a0505e 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -31,6 +31,7 @@ struct CNodeStateStats { std::vector vHeightInFlight; uint64_t m_addr_processed = 0; uint64_t m_addr_rate_limited = 0; + bool m_addr_relay_enabled{false}; }; class PeerManager : public CValidationInterface, public NetEventsInterface diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index dba0f971b2..3962a13924 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -118,6 +118,7 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "addr", "(host:port) The IP address and port of the peer"}, {RPCResult::Type::STR, "addrbind", "(ip:port) Bind address of the connection to the peer"}, {RPCResult::Type::STR, "addrlocal", "(ip:port) Local address as reported by the peer"}, + {RPCResult::Type::BOOL, "addr_relay_enabled", "Whether we participate in address relay with this peer"}, {RPCResult::Type::STR, "network", "Network (" + Join(GetNetworkNames(/* append_unroutable */ true), ", ") + ")"}, {RPCResult::Type::NUM, "mapped_as", "The AS in the BGP route to the peer used for diversifying\n" "peer selection (only available if the asmap config flag is set)"}, @@ -201,6 +202,7 @@ static RPCHelpMan getpeerinfo() if (!(stats.addrLocal.empty())) { obj.pushKV("addrlocal", stats.addrLocal); } + obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled); obj.pushKV("network", GetNetworkName(stats.m_network)); if (stats.m_mapped_as != 0) { obj.pushKV("mapped_as", uint64_t(stats.m_mapped_as)); From 3f7250b328b8b2f5d63f323702445ac5c989b73d Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Fri, 23 Jul 2021 18:19:32 -0700 Subject: [PATCH 080/112] [test] Use the new endpoint to improve tests --- test/functional/p2p_addr_relay.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 4233d8d010..95743a1bf5 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -210,6 +210,11 @@ def inbound_blackhole_tests(self): blackhole_peer = self.nodes[0].add_p2p_connection(AddrReceiver(send_getaddr=False)) initial_addrs_received = receiver_peer.num_ipv4_received + peerinfo = self.nodes[0].getpeerinfo() + assert_equal(peerinfo[0]['addr_relay_enabled'], True) # addr_source + assert_equal(peerinfo[1]['addr_relay_enabled'], True) # receiver_peer + assert_equal(peerinfo[2]['addr_relay_enabled'], False) # blackhole_peer + # addr_source sends 2 addresses to node0 msg = self.setup_addr_msg(2) addr_source.send_and_ping(msg) @@ -232,11 +237,14 @@ def inbound_blackhole_tests(self): self.log.info("After blackhole peer sends addr message, it becomes eligible for addr gossip") blackhole_peer.send_and_ping(msg_addr()) - msg = self.setup_addr_msg(2) - self.send_addr_msg(addr_source, msg, [receiver_peer, blackhole_peer]) # Confirm node has now received addr-related messages from blackhole peer assert_greater_than(self.sum_addr_messages(peerinfo[1]['bytesrecv_per_msg']), 0) + assert_equal(self.nodes[0].getpeerinfo()[2]['addr_relay_enabled'], True) + + msg = self.setup_addr_msg(2) + self.send_addr_msg(addr_source, msg, [receiver_peer, blackhole_peer]) + # And that peer received addresses assert_equal(blackhole_peer.num_ipv4_received, 2) From 2962640c49cf38f76345e45e63045a8f0eed5c61 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Fri, 30 Jul 2021 11:03:44 +0200 Subject: [PATCH 081/112] contrib, p2p: update I2P hardcoded seeds --- contrib/seeds/nodes_main.txt | 14 +++++++++++--- src/chainparamsseeds.h | 12 ++++++++++-- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/contrib/seeds/nodes_main.txt b/contrib/seeds/nodes_main.txt index f7bfb6eb0a..b9dfdb4b0a 100644 --- a/contrib/seeds/nodes_main.txt +++ b/contrib/seeds/nodes_main.txt @@ -677,12 +677,20 @@ tddeij4qigtjr6jfnrmq6btnirmq5msgwcsdpcdjr7atftm7cxlqztid.onion:8333 vi5bnbxkleeqi6hfccjochnn65lcxlfqs4uwgmhudph554zibiusqnad.onion:8333 xqt25cobm5zqucac3634zfght72he6u3eagfyej5ellbhcdgos7t2had.onion:8333 -# manually added 2021-05 for minimal i2p bootstrap support -72l3ucjkuscrbiiepoehuwqgknyzgo7zuix5ty4puwrkyhtmnsga.b32.i2p:0 +# manually added 2021-08 for minimal i2p bootstrap support +4hllr6w55mbtemb3ebvlzl4zj6qke4si7zcob5qdyg63mjgq624a.b32.i2p:0 +6s33jtpvwzkiej3nff5qm72slgqljxhxn62hdt6m7nvynqsxqdda.b32.i2p:0 +a5qsnv3maw77mlmmzlcglu6twje6ttctd3fhpbfwcbpmewx6fczq.b32.i2p:0 +bitcornrd36coazsbzsz4pdebyzvaplmsalq4kpoljmn6cg6x5zq.b32.i2p:0 c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:0 +dhtq2p76tyhi442aidb3vd2bv7yxxjuddpb2jydnnrl2ons5bhha.b32.i2p:0 gehtac45oaghz54ypyopim64mql7oad2bqclla74l6tfeolzmodq.b32.i2p:0 h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:0 hnbbyjpxx54623l555sta7pocy3se4sdgmuebi5k6reesz5rjp6q.b32.i2p:0 -pjs7or2ctvteeo5tu4bwyrtydeuhqhvdprtujn4daxr75jpebjxa.b32.i2p:0 +i3hcdakiz2tyvggkwefvdjoi7444kgvd2mbdfizjvv43q7zukezq.b32.i2p:0 +jz3s4eurm5vzjresf4mwo7oni4bk36daolwxh4iqtewakylgkxmq.b32.i2p:0 +kokkmpquqlkptu5hkmzqlttsmtwxicldr4so7wqsufk6bwf32nma.b32.i2p:0 +kvrde7mcgjhz3xzeltwy4gs2rxdfbnbs2wc67mh2pt43wjmjnmbq.b32.i2p:0 +shh2ewyegnuwnmdse5kl5toybdvzkvk2yj4zcowz6iwhhh3ykdfa.b32.i2p:0 wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:0 zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:0 diff --git a/src/chainparamsseeds.h b/src/chainparamsseeds.h index a22529c386..953a09d5e7 100644 --- a/src/chainparamsseeds.h +++ b/src/chainparamsseeds.h @@ -683,12 +683,20 @@ static const uint8_t chainparams_seed_main[] = { 0x04,0x20,0x98,0xc6,0x44,0x27,0x90,0x41,0xa6,0x98,0xf9,0x25,0x6c,0x59,0x0f,0x06,0x6d,0x44,0x59,0x0e,0xb2,0x46,0xb0,0xa4,0x37,0x88,0x69,0x8f,0xc1,0x32,0xcd,0x9f,0x15,0xd7,0x20,0x8d, 0x04,0x20,0xaa,0x3a,0x16,0x86,0xea,0x59,0x09,0x04,0x78,0xe5,0x10,0x92,0xe1,0x1d,0xad,0xf7,0x56,0x2b,0xac,0xb0,0x97,0x29,0x63,0x30,0xf4,0x1b,0xcf,0xde,0xf3,0x28,0x0a,0x29,0x20,0x8d, 0x04,0x20,0xbc,0x27,0xae,0x89,0xc1,0x67,0x73,0x0a,0x08,0x02,0xdf,0xb7,0xcc,0x94,0xc7,0x9f,0xf4,0x72,0x7a,0x9b,0x20,0x0c,0x5c,0x11,0x3d,0x22,0xd6,0x13,0x88,0x66,0x74,0xbf,0x20,0x8d, - 0x05,0x20,0xfe,0x97,0xba,0x09,0x2a,0xa4,0x85,0x10,0xa1,0x04,0x7b,0x88,0x7a,0x5a,0x06,0x53,0x71,0x93,0x3b,0xf9,0xa2,0x2f,0xd9,0xe3,0x8f,0xa5,0xa2,0xac,0x1e,0x6c,0x6c,0x8c,0x00,0x00, + 0x05,0x20,0xe1,0xd6,0xb8,0xfa,0xdd,0xeb,0x03,0x32,0x30,0x3b,0x20,0x6a,0xbc,0xaf,0x99,0x4f,0xa0,0xa2,0x72,0x48,0xfe,0x44,0xe0,0xf6,0x03,0xc1,0xbd,0xb6,0x24,0xd0,0xf6,0xb8,0x00,0x00, + 0x05,0x20,0xf4,0xb7,0xb4,0xcd,0xf5,0xb6,0x54,0x82,0x27,0x6d,0x29,0x7b,0x06,0x7f,0x52,0x59,0xa0,0xb4,0xdc,0xf7,0x6f,0xb4,0x71,0xcf,0xcc,0xfb,0x6b,0x86,0xc2,0x57,0x80,0xc6,0x00,0x00, + 0x05,0x20,0x07,0x61,0x26,0xd7,0x6c,0x05,0xbf,0xf6,0x2d,0x8c,0xca,0xc4,0x65,0xd3,0xd3,0xb2,0x49,0xe9,0xcc,0x53,0x1e,0xca,0x77,0x84,0xb6,0x10,0x5e,0xc2,0x5a,0xfe,0x28,0xb3,0x00,0x00, + 0x05,0x20,0x0a,0x26,0x27,0x45,0xb1,0x1e,0xfc,0x27,0x03,0x32,0x0e,0x65,0x9e,0x3c,0x64,0x0e,0x33,0x50,0x3d,0x6c,0x90,0x17,0x0e,0x29,0xee,0x5a,0x58,0xdf,0x08,0xde,0xbf,0x73,0x00,0x00, 0x05,0x20,0x17,0x0c,0x56,0xce,0x72,0xa5,0xa0,0xe6,0x23,0x06,0xa3,0xc7,0x08,0x43,0x18,0xee,0x3a,0x46,0x35,0x5d,0x17,0xf6,0x78,0x96,0xa0,0x9c,0x51,0xef,0xbe,0x23,0xfd,0x71,0x00,0x00, + 0x05,0x20,0x19,0xe7,0x0d,0x3f,0xfe,0x9e,0x0e,0x8e,0x73,0x40,0x40,0xc3,0xba,0x8f,0x41,0xaf,0xf1,0x7b,0xa6,0x83,0x1b,0xc3,0xa4,0xe0,0x6d,0x6c,0x57,0xa7,0x36,0x5d,0x09,0xce,0x00,0x00, 0x05,0x20,0x31,0x0f,0x30,0x0b,0x9d,0x70,0x0c,0x7c,0xf7,0x98,0x7e,0x1c,0xf4,0x33,0xdc,0x64,0x17,0xf7,0x00,0x7a,0x0c,0x04,0xb5,0x83,0xfc,0x5f,0xa6,0x52,0x39,0x79,0x63,0x87,0x00,0x00, 0x05,0x20,0x3e,0xe3,0xe0,0xa9,0xbc,0xf4,0x2e,0x59,0xd9,0x20,0xee,0xdf,0x74,0x61,0x4d,0x99,0x0c,0x5c,0x15,0x30,0x9b,0x72,0x16,0x79,0x15,0xf4,0x7a,0xca,0x34,0xcc,0x81,0x99,0x00,0x00, 0x05,0x20,0x3b,0x42,0x1c,0x25,0xf7,0xbf,0x79,0xed,0x6d,0x7d,0xef,0x65,0x30,0x7d,0xee,0x16,0x37,0x22,0x72,0x43,0x33,0x28,0x40,0xa3,0xaa,0xf4,0x48,0x49,0x67,0xb1,0x4b,0xfd,0x00,0x00, - 0x05,0x20,0x7a,0x65,0xf7,0x47,0x42,0x9d,0x66,0x42,0x3b,0xb3,0xa7,0x03,0x6c,0x46,0x78,0x19,0x28,0x78,0x1e,0xa3,0x7c,0x67,0x44,0xb7,0x83,0x05,0xe3,0xfe,0xa5,0xe4,0x0a,0x6e,0x00,0x00, + 0x05,0x20,0x46,0xce,0x21,0x81,0x48,0xce,0xa7,0x8a,0x98,0xca,0xb1,0x0b,0x51,0xa5,0xc8,0xff,0x39,0xc5,0x1a,0xa3,0xd3,0x02,0x32,0xa3,0x29,0xad,0x79,0xb8,0x7f,0x34,0x51,0x33,0x00,0x00, + 0x05,0x20,0x4e,0x77,0x2e,0x12,0x91,0x67,0x6b,0x94,0xc4,0x92,0x2f,0x19,0x67,0x7d,0xcd,0x47,0x02,0xad,0xf8,0x60,0x72,0xed,0x73,0xf1,0x10,0x99,0x2c,0x05,0x61,0x66,0x55,0xd9,0x00,0x00, + 0x05,0x20,0x53,0x94,0xa6,0x3e,0x14,0x82,0xd4,0xf9,0xd3,0xa7,0x53,0x33,0x05,0xce,0x72,0x64,0xed,0x74,0x09,0x63,0x8f,0x24,0xef,0xda,0x12,0xa1,0x55,0xe0,0xd8,0xbb,0xd3,0x58,0x00,0x00, + 0x05,0x20,0x55,0x62,0x32,0x7d,0x82,0x32,0x4f,0x9d,0xdf,0x24,0x5c,0xed,0x8e,0x1a,0x5a,0x8d,0xc6,0x50,0xb4,0x32,0xd5,0x85,0xef,0xb0,0xfa,0x7c,0xf9,0xbb,0x25,0x89,0x6b,0x03,0x00,0x00, + 0x05,0x20,0x91,0xcf,0xa2,0x5b,0x04,0x33,0x69,0x66,0xb0,0x72,0x27,0x54,0xbe,0xcd,0xd8,0x08,0xeb,0x95,0x55,0x5a,0xc2,0x79,0x91,0x3a,0xd9,0xf2,0x2c,0x73,0x9f,0x78,0x50,0xca,0x00,0x00, 0x05,0x20,0xb5,0x83,0x6f,0xb6,0x11,0xd8,0x0e,0xa8,0x57,0xda,0x15,0x20,0x5b,0x1a,0x6d,0x21,0x15,0x5a,0xbd,0xb4,0x17,0x11,0xc2,0xfb,0x0e,0xfc,0xde,0xe8,0x26,0x56,0xa8,0xac,0x00,0x00, 0x05,0x20,0xcc,0xaf,0x6c,0x3b,0xd0,0x13,0x76,0x23,0xc3,0x36,0xbb,0x64,0x4a,0x4a,0x06,0x93,0x69,0x6d,0xb0,0x10,0x6e,0x66,0xa4,0x61,0xf8,0x2d,0xe7,0x80,0x72,0x4d,0x53,0x94,0x00,0x00, }; From fa384fdd0b7af73d81fa9619c5fba779452cd2af Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 28 Jul 2021 20:00:23 +0200 Subject: [PATCH 082/112] Ignore banlist.dat This also allows to remove the "dirty" argument, which can now be deduced from the return value of Read(). --- doc/files.md | 2 +- src/addrdb.cpp | 13 ++++++------- src/addrdb.h | 6 ++---- src/banman.cpp | 2 +- src/banman.h | 2 +- src/test/fuzz/banman.cpp | 4 +--- 6 files changed, 12 insertions(+), 17 deletions(-) diff --git a/doc/files.md b/doc/files.md index e670d77ae5..f88d3f91a1 100644 --- a/doc/files.md +++ b/doc/files.md @@ -56,7 +56,6 @@ Subdirectory | File(s) | Description `indexes/coinstats/db/` | LevelDB database | Coinstats index; *optional*, used if `-coinstatsindex=1` `wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, wallets reside in the [data directory](#data-directory-location) `./` | `anchors.dat` | Anchor IP address database, created on shutdown and deleted at startup. Anchors are last known outgoing block-relay-only peers that are tried to re-connect to on startup -`./` | `banlist.dat` | Stores the addresses/subnets of banned nodes (deprecated). `bitcoind` or `bitcoin-qt` no longer save the banlist to this file, but read it on startup if `banlist.json` is not present. `./` | `banlist.json` | Stores the addresses/subnets of banned nodes. `./` | `bitcoin.conf` | User-defined [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`. File is not written to by the software and must be created manually. Path can be specified by `-conf` option `./` | `bitcoind.pid` | Stores the process ID (PID) of `bitcoind` or `bitcoin-qt` while running; created at start and deleted on shutdown; can be specified by `-pid` option @@ -114,6 +113,7 @@ These subdirectories and files are no longer used by Bitcoin Core: Path | Description | Repository notes ---------------|-------------|----------------- +`banlist.dat` | Stores the addresses/subnets of banned nodes; superseded by `banlist.json` in 22.0 and completely ignored in 23.0 | [PR #20966](https://github.com/bitcoin/bitcoin/pull/20966), [PR #22570](https://github.com/bitcoin/bitcoin/pull/22570) `blktree/` | Blockchain index; replaced by `blocks/index/` in [0.8.0](https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-0.8.0.md#improvements) | [PR #2231](https://github.com/bitcoin/bitcoin/pull/2231), [`8fdc94cc`](https://github.com/bitcoin/bitcoin/commit/8fdc94cc8f0341e96b1edb3a5b56811c0b20bd15) `coins/` | Unspent transaction output database; replaced by `chainstate/` in 0.8.0 | [PR #2231](https://github.com/bitcoin/bitcoin/pull/2231), [`8fdc94cc`](https://github.com/bitcoin/bitcoin/commit/8fdc94cc8f0341e96b1edb3a5b56811c0b20bd15) `blkindex.dat` | Blockchain index BDB database; replaced by {`chainstate/`, `blocks/index/`, `blocks/revNNNNN.dat`[\[2\]](#note2)} in 0.8.0 | [PR #1677](https://github.com/bitcoin/bitcoin/pull/1677) diff --git a/src/addrdb.cpp b/src/addrdb.cpp index b8fd019bab..c3e224ee83 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -197,17 +197,16 @@ bool CBanDB::Write(const banmap_t& banSet) return false; } -bool CBanDB::Read(banmap_t& banSet, bool& dirty) +bool CBanDB::Read(banmap_t& banSet) { - // If the JSON banlist does not exist, then try to read the non-upgraded banlist.dat. + if (fs::exists(m_banlist_dat)) { + LogPrintf("banlist.dat ignored because it can only be read by " PACKAGE_NAME " version 22.x. Remove %s to silence this warning.\n", m_banlist_dat); + } + // If the JSON banlist does not exist, then recreate it if (!fs::exists(m_banlist_json)) { - // If this succeeds then we need to flush to disk in order to create the JSON banlist. - dirty = true; - return DeserializeFileDB(m_banlist_dat, banSet, CLIENT_VERSION); + return false; } - dirty = false; - std::map settings; std::vector errors; diff --git a/src/addrdb.h b/src/addrdb.h index 399103c991..1e0ccb1f60 100644 --- a/src/addrdb.h +++ b/src/addrdb.h @@ -76,7 +76,7 @@ class CAddrDB static bool Read(CAddrMan& addr, CDataStream& ssPeers); }; -/** Access to the banlist databases (banlist.json and banlist.dat) */ +/** Access to the banlist database (banlist.json) */ class CBanDB { private: @@ -95,11 +95,9 @@ class CBanDB * Read the banlist from disk. * @param[out] banSet The loaded list. Set if `true` is returned, otherwise it is left * in an undefined state. - * @param[out] dirty Indicates whether the loaded list needs flushing to disk. Set if - * `true` is returned, otherwise it is left in an undefined state. * @return true on success */ - bool Read(banmap_t& banSet, bool& dirty); + bool Read(banmap_t& banSet); }; /** diff --git a/src/banman.cpp b/src/banman.cpp index d2437e6733..c64a48a05a 100644 --- a/src/banman.cpp +++ b/src/banman.cpp @@ -18,7 +18,7 @@ BanMan::BanMan(fs::path ban_file, CClientUIInterface* client_interface, int64_t if (m_client_interface) m_client_interface->InitMessage(_("Loading banlist…").translated); int64_t n_start = GetTimeMillis(); - if (m_ban_db.Read(m_banned, m_is_dirty)) { + if (m_ban_db.Read(m_banned)) { SweepBanned(); // sweep out unused entries LogPrint(BCLog::NET, "Loaded %d banned node addresses/subnets %dms\n", m_banned.size(), diff --git a/src/banman.h b/src/banman.h index 8c75d4037e..8a03a9e3fc 100644 --- a/src/banman.h +++ b/src/banman.h @@ -88,7 +88,7 @@ class BanMan RecursiveMutex m_cs_banned; banmap_t m_banned GUARDED_BY(m_cs_banned); - bool m_is_dirty GUARDED_BY(m_cs_banned); + bool m_is_dirty GUARDED_BY(m_cs_banned){false}; CClientUIInterface* m_client_interface = nullptr; CBanDB m_ban_db; const int64_t m_default_ban_time; diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp index 1986b5e4c8..de211f601f 100644 --- a/src/test/fuzz/banman.cpp +++ b/src/test/fuzz/banman.cpp @@ -52,8 +52,7 @@ FUZZ_TARGET_INIT(banman, initialize_banman) const bool start_with_corrupted_banlist{fuzzed_data_provider.ConsumeBool()}; bool force_read_and_write_to_err{false}; if (start_with_corrupted_banlist) { - const std::string sfx{fuzzed_data_provider.ConsumeBool() ? ".dat" : ".json"}; - assert(WriteBinaryFile(banlist_file.string() + sfx, + assert(WriteBinaryFile(banlist_file.string() + ".json", fuzzed_data_provider.ConsumeRandomLengthString())); } else { force_read_and_write_to_err = fuzzed_data_provider.ConsumeBool(); @@ -114,6 +113,5 @@ FUZZ_TARGET_INIT(banman, initialize_banman) (void)(banmap == banmap_read); } } - fs::remove(banlist_file.string() + ".dat"); fs::remove(banlist_file.string() + ".json"); } From d2dffd5be4c8f6a1942dd971d09707c3620a1689 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Fri, 30 Jul 2021 11:22:52 +0200 Subject: [PATCH 083/112] doc: add info to i2p.md about IBD time and multiple networks --- doc/i2p.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/i2p.md b/doc/i2p.md index 27ef4d9d9f..3a507a25ab 100644 --- a/doc/i2p.md +++ b/doc/i2p.md @@ -47,13 +47,21 @@ information in the debug log about your I2P configuration and connections. Run `bitcoin-cli help logging` for more information. It is possible to restrict outgoing connections in the usual way with -`onlynet=i2p`. I2P support was added to Bitcoin Core in version 22.0 (mid 2021) +`onlynet=i2p`. I2P support was added to Bitcoin Core in version 22.0 (mid-2021) and there may be fewer I2P peers than Tor or IP ones. Therefore, using `onlynet=i2p` alone (without other `onlynet=`) may make a node more susceptible to [Sybil attacks](https://en.bitcoin.it/wiki/Weaknesses#Sybil_attack). Use `bitcoin-cli -addrinfo` to see the number of I2P addresses known to your node. -## I2P related information in Bitcoin Core +Another consideration with `onlynet=i2p` is that the initial blocks download +phase when syncing up a new node can be very slow. This phase can be sped up by +using other networks, for instance `onlynet=onion`, at the same time. + +In general, a node can be run with both onion and I2P hidden services (or +any/all of IPv4/IPv6/onion/I2P), which can provide a potential fallback if one +of the networks has issues. + +## I2P-related information in Bitcoin Core There are several ways to see your I2P address in Bitcoin Core: - in the debug log (grep for `AddLocal`, the I2P address ends in `.b32.i2p`) From fa4e6afdae7b82df638b60edf37ac36d57a8cb4f Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 28 Jul 2021 19:52:14 +0200 Subject: [PATCH 084/112] Remove unused CSubNet serialize code --- src/netaddress.h | 18 ------------------ src/test/fuzz/deserialize.cpp | 11 ----------- 2 files changed, 29 deletions(-) diff --git a/src/netaddress.h b/src/netaddress.h index 5e1d9d2a6f..c81f9fd5d8 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -536,24 +536,6 @@ class CSubNet friend bool operator==(const CSubNet& a, const CSubNet& b); friend bool operator!=(const CSubNet& a, const CSubNet& b) { return !(a == b); } friend bool operator<(const CSubNet& a, const CSubNet& b); - - SERIALIZE_METHODS(CSubNet, obj) - { - READWRITE(obj.network); - if (obj.network.IsIPv4()) { - // Before commit 102867c587f5f7954232fb8ed8e85cda78bb4d32, CSubNet used the last 4 bytes of netmask - // to store the relevant bytes for an IPv4 mask. For compatibility reasons, keep doing so in - // serialized form. - unsigned char dummy[12] = {0}; - READWRITE(dummy); - READWRITE(MakeSpan(obj.netmask).first(4)); - } else { - READWRITE(obj.netmask); - } - READWRITE(obj.valid); - // Mark invalid if the result doesn't pass sanity checking. - SER_READ(obj, if (obj.valid) obj.valid = obj.SanityCheck()); - } }; /** A combination of a network address (CNetAddr) and a (TCP) port */ diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 721e4360d0..63f75bc9a0 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -142,17 +142,6 @@ FUZZ_TARGET_DESERIALIZE(script_deserialize, { CScript script; DeserializeFromFuzzingInput(buffer, script); }) -FUZZ_TARGET_DESERIALIZE(sub_net_deserialize, { - CSubNet sub_net_1; - DeserializeFromFuzzingInput(buffer, sub_net_1, INIT_PROTO_VERSION); - AssertEqualAfterSerializeDeserialize(sub_net_1, INIT_PROTO_VERSION); - CSubNet sub_net_2; - DeserializeFromFuzzingInput(buffer, sub_net_2, INIT_PROTO_VERSION | ADDRV2_FORMAT); - AssertEqualAfterSerializeDeserialize(sub_net_2, INIT_PROTO_VERSION | ADDRV2_FORMAT); - CSubNet sub_net_3; - DeserializeFromFuzzingInput(buffer, sub_net_3); - AssertEqualAfterSerializeDeserialize(sub_net_3, INIT_PROTO_VERSION | ADDRV2_FORMAT); -}) FUZZ_TARGET_DESERIALIZE(tx_in_deserialize, { CTxIn tx_in; DeserializeFromFuzzingInput(buffer, tx_in); From fa1eddb1a3d1319ddc3643b6f34fe2014de32764 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Wed, 28 Jul 2021 19:54:51 +0200 Subject: [PATCH 085/112] Fix whitespace in touched files Leaving the incorrect indentation would be frustrating because: * Some editor may fix up the whitespace when editing a file, so before commiting the whitespace changes need to be undone. * It makes it harder to use clang-format-diff on a change. Can be trivially reviewed with --word-diff-regex=. --ignore-all-space --- src/netaddress.h | 811 +++++++++++++++++----------------- src/test/fuzz/deserialize.cpp | 268 +++++------ 2 files changed, 538 insertions(+), 541 deletions(-) diff --git a/src/netaddress.h b/src/netaddress.h index c81f9fd5d8..eb35ed3fac 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -42,8 +42,7 @@ static constexpr int ADDRV2_FORMAT = 0x20000000; * over all enum values and also `GetExtNetwork()` "extends" this enum by * introducing standalone constants starting from `NET_MAX`. */ -enum Network -{ +enum Network { /// Addresses from these networks are not publicly routable on the global Internet. NET_UNROUTABLE = 0, @@ -73,16 +72,14 @@ enum Network /// Prefix of an IPv6 address when it contains an embedded IPv4 address. /// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155). static const std::array IPV4_IN_IPV6_PREFIX{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF -}; + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF}; /// Prefix of an IPv6 address when it contains an embedded TORv2 address. /// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155). /// Such dummy IPv6 addresses are guaranteed to not be publicly routable as they /// fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses. static const std::array TORV2_IN_IPV6_PREFIX{ - 0xFD, 0x87, 0xD8, 0x7E, 0xEB, 0x43 -}; + 0xFD, 0x87, 0xD8, 0x7E, 0xEB, 0x43}; /// Prefix of an IPv6 address when it contains an embedded "internal" address. /// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155). @@ -120,354 +117,354 @@ static constexpr uint16_t I2P_SAM31_PORT{0}; */ class CNetAddr { - protected: - /** - * Raw representation of the network address. - * In network byte order (big endian) for IPv4 and IPv6. - */ - prevector m_addr{ADDR_IPV6_SIZE, 0x0}; - - /** - * Network to which this address belongs. - */ - Network m_net{NET_IPV6}; - - /** - * Scope id if scoped/link-local IPV6 address. - * See https://tools.ietf.org/html/rfc4007 - */ - uint32_t m_scope_id{0}; - - public: - CNetAddr(); - explicit CNetAddr(const struct in_addr& ipv4Addr); - void SetIP(const CNetAddr& ip); - - /** - * Set from a legacy IPv6 address. - * Legacy IPv6 address may be a normal IPv6 address, or another address - * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy - * `addr` encoding. - */ - void SetLegacyIPv6(Span ipv6); - - bool SetInternal(const std::string& name); - - /** - * Parse a Tor or I2P address and set this object to it. - * @param[in] addr Address to parse, for example - * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion or - * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p. - * @returns Whether the operation was successful. - * @see CNetAddr::IsTor(), CNetAddr::IsI2P() - */ - bool SetSpecial(const std::string& addr); - - bool IsBindAny() const; // INADDR_ANY equivalent - bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) - bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor) - bool IsRFC1918() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) - bool IsRFC2544() const; // IPv4 inter-network communications (198.18.0.0/15) - bool IsRFC6598() const; // IPv4 ISP-level NAT (100.64.0.0/10) - bool IsRFC5737() const; // IPv4 documentation addresses (192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24) - bool IsRFC3849() const; // IPv6 documentation address (2001:0DB8::/32) - bool IsRFC3927() const; // IPv4 autoconfig (169.254.0.0/16) - bool IsRFC3964() const; // IPv6 6to4 tunnelling (2002::/16) - bool IsRFC4193() const; // IPv6 unique local (FC00::/7) - bool IsRFC4380() const; // IPv6 Teredo tunnelling (2001::/32) - bool IsRFC4843() const; // IPv6 ORCHID (deprecated) (2001:10::/28) - bool IsRFC7343() const; // IPv6 ORCHIDv2 (2001:20::/28) - bool IsRFC4862() const; // IPv6 autoconfig (FE80::/64) - bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96) - bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765) - bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36) - bool IsTor() const; - bool IsI2P() const; - bool IsCJDNS() const; - bool IsLocal() const; - bool IsRoutable() const; - bool IsInternal() const; - bool IsValid() const; - - /** - * Check if the current object can be serialized in pre-ADDRv2/BIP155 format. - */ - bool IsAddrV1Compatible() const; - - enum Network GetNetwork() const; - std::string ToString() const; - std::string ToStringIP() const; - uint64_t GetHash() const; - bool GetInAddr(struct in_addr* pipv4Addr) const; - Network GetNetClass() const; - - //! For IPv4, mapped IPv4, SIIT translated IPv4, Teredo, 6to4 tunneled addresses, return the relevant IPv4 address as a uint32. - uint32_t GetLinkedIPv4() const; - //! Whether this address has a linked IPv4 address (see GetLinkedIPv4()). - bool HasLinkedIPv4() const; - - // The AS on the BGP path to the node we use to diversify - // peers in AddrMan bucketing based on the AS infrastructure. - // The ip->AS mapping depends on how asmap is constructed. - uint32_t GetMappedAS(const std::vector &asmap) const; - - std::vector GetGroup(const std::vector &asmap) const; - std::vector GetAddrBytes() const; - int GetReachabilityFrom(const CNetAddr *paddrPartner = nullptr) const; - - explicit CNetAddr(const struct in6_addr& pipv6Addr, const uint32_t scope = 0); - bool GetIn6Addr(struct in6_addr* pipv6Addr) const; - - friend bool operator==(const CNetAddr& a, const CNetAddr& b); - friend bool operator!=(const CNetAddr& a, const CNetAddr& b) { return !(a == b); } - friend bool operator<(const CNetAddr& a, const CNetAddr& b); - - /** - * Whether this address should be relayed to other peers even if we can't reach it ourselves. - */ - bool IsRelayable() const - { - return IsIPv4() || IsIPv6() || IsTor() || IsI2P(); - } +protected: + /** + * Raw representation of the network address. + * In network byte order (big endian) for IPv4 and IPv6. + */ + prevector m_addr{ADDR_IPV6_SIZE, 0x0}; + + /** + * Network to which this address belongs. + */ + Network m_net{NET_IPV6}; + + /** + * Scope id if scoped/link-local IPV6 address. + * See https://tools.ietf.org/html/rfc4007 + */ + uint32_t m_scope_id{0}; - /** - * Serialize to a stream. - */ - template - void Serialize(Stream& s) const - { - if (s.GetVersion() & ADDRV2_FORMAT) { - SerializeV2Stream(s); - } else { - SerializeV1Stream(s); - } - } +public: + CNetAddr(); + explicit CNetAddr(const struct in_addr& ipv4Addr); + void SetIP(const CNetAddr& ip); + + /** + * Set from a legacy IPv6 address. + * Legacy IPv6 address may be a normal IPv6 address, or another address + * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy + * `addr` encoding. + */ + void SetLegacyIPv6(Span ipv6); + + bool SetInternal(const std::string& name); + + /** + * Parse a Tor or I2P address and set this object to it. + * @param[in] addr Address to parse, for example + * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion or + * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p. + * @returns Whether the operation was successful. + * @see CNetAddr::IsTor(), CNetAddr::IsI2P() + */ + bool SetSpecial(const std::string& addr); + + bool IsBindAny() const; // INADDR_ANY equivalent + bool IsIPv4() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) + bool IsIPv6() const; // IPv6 address (not mapped IPv4, not Tor) + bool IsRFC1918() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) + bool IsRFC2544() const; // IPv4 inter-network communications (198.18.0.0/15) + bool IsRFC6598() const; // IPv4 ISP-level NAT (100.64.0.0/10) + bool IsRFC5737() const; // IPv4 documentation addresses (192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24) + bool IsRFC3849() const; // IPv6 documentation address (2001:0DB8::/32) + bool IsRFC3927() const; // IPv4 autoconfig (169.254.0.0/16) + bool IsRFC3964() const; // IPv6 6to4 tunnelling (2002::/16) + bool IsRFC4193() const; // IPv6 unique local (FC00::/7) + bool IsRFC4380() const; // IPv6 Teredo tunnelling (2001::/32) + bool IsRFC4843() const; // IPv6 ORCHID (deprecated) (2001:10::/28) + bool IsRFC7343() const; // IPv6 ORCHIDv2 (2001:20::/28) + bool IsRFC4862() const; // IPv6 autoconfig (FE80::/64) + bool IsRFC6052() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96) + bool IsRFC6145() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in RFC2765) + bool IsHeNet() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36) + bool IsTor() const; + bool IsI2P() const; + bool IsCJDNS() const; + bool IsLocal() const; + bool IsRoutable() const; + bool IsInternal() const; + bool IsValid() const; + + /** + * Check if the current object can be serialized in pre-ADDRv2/BIP155 format. + */ + bool IsAddrV1Compatible() const; + + enum Network GetNetwork() const; + std::string ToString() const; + std::string ToStringIP() const; + uint64_t GetHash() const; + bool GetInAddr(struct in_addr* pipv4Addr) const; + Network GetNetClass() const; + + //! For IPv4, mapped IPv4, SIIT translated IPv4, Teredo, 6to4 tunneled addresses, return the relevant IPv4 address as a uint32. + uint32_t GetLinkedIPv4() const; + //! Whether this address has a linked IPv4 address (see GetLinkedIPv4()). + bool HasLinkedIPv4() const; + + // The AS on the BGP path to the node we use to diversify + // peers in AddrMan bucketing based on the AS infrastructure. + // The ip->AS mapping depends on how asmap is constructed. + uint32_t GetMappedAS(const std::vector& asmap) const; + + std::vector GetGroup(const std::vector& asmap) const; + std::vector GetAddrBytes() const; + int GetReachabilityFrom(const CNetAddr* paddrPartner = nullptr) const; + + explicit CNetAddr(const struct in6_addr& pipv6Addr, const uint32_t scope = 0); + bool GetIn6Addr(struct in6_addr* pipv6Addr) const; + + friend bool operator==(const CNetAddr& a, const CNetAddr& b); + friend bool operator!=(const CNetAddr& a, const CNetAddr& b) { return !(a == b); } + friend bool operator<(const CNetAddr& a, const CNetAddr& b); + + /** + * Whether this address should be relayed to other peers even if we can't reach it ourselves. + */ + bool IsRelayable() const + { + return IsIPv4() || IsIPv6() || IsTor() || IsI2P(); + } - /** - * Unserialize from a stream. - */ - template - void Unserialize(Stream& s) - { - if (s.GetVersion() & ADDRV2_FORMAT) { - UnserializeV2Stream(s); - } else { - UnserializeV1Stream(s); - } + /** + * Serialize to a stream. + */ + template + void Serialize(Stream& s) const + { + if (s.GetVersion() & ADDRV2_FORMAT) { + SerializeV2Stream(s); + } else { + SerializeV1Stream(s); } + } - friend class CNetAddrHash; - friend class CSubNet; - - private: - /** - * Parse a Tor address and set this object to it. - * @param[in] addr Address to parse, must be a valid C string, for example - * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion. - * @returns Whether the operation was successful. - * @see CNetAddr::IsTor() - */ - bool SetTor(const std::string& addr); - - /** - * Parse an I2P address and set this object to it. - * @param[in] addr Address to parse, must be a valid C string, for example - * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p. - * @returns Whether the operation was successful. - * @see CNetAddr::IsI2P() - */ - bool SetI2P(const std::string& addr); - - /** - * BIP155 network ids recognized by this software. - */ - enum BIP155Network : uint8_t { - IPV4 = 1, - IPV6 = 2, - TORV2 = 3, - TORV3 = 4, - I2P = 5, - CJDNS = 6, - }; - - /** - * Size of CNetAddr when serialized as ADDRv1 (pre-BIP155) (in bytes). - */ - static constexpr size_t V1_SERIALIZATION_SIZE = ADDR_IPV6_SIZE; - - /** - * Maximum size of an address as defined in BIP155 (in bytes). - * This is only the size of the address, not the entire CNetAddr object - * when serialized. - */ - static constexpr size_t MAX_ADDRV2_SIZE = 512; - - /** - * Get the BIP155 network id of this address. - * Must not be called for IsInternal() objects. - * @returns BIP155 network id, except TORV2 which is no longer supported. - */ - BIP155Network GetBIP155Network() const; - - /** - * Set `m_net` from the provided BIP155 network id and size after validation. - * @retval true the network was recognized, is valid and `m_net` was set - * @retval false not recognised (from future?) and should be silently ignored - * @throws std::ios_base::failure if the network is one of the BIP155 founding - * networks (id 1..6) with wrong address size. - */ - bool SetNetFromBIP155Network(uint8_t possible_bip155_net, size_t address_size); - - /** - * Serialize in pre-ADDRv2/BIP155 format to an array. - */ - void SerializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) const - { - size_t prefix_size; - - switch (m_net) { - case NET_IPV6: - assert(m_addr.size() == sizeof(arr)); - memcpy(arr, m_addr.data(), m_addr.size()); - return; - case NET_IPV4: - prefix_size = sizeof(IPV4_IN_IPV6_PREFIX); - assert(prefix_size + m_addr.size() == sizeof(arr)); - memcpy(arr, IPV4_IN_IPV6_PREFIX.data(), prefix_size); - memcpy(arr + prefix_size, m_addr.data(), m_addr.size()); - return; - case NET_INTERNAL: - prefix_size = sizeof(INTERNAL_IN_IPV6_PREFIX); - assert(prefix_size + m_addr.size() == sizeof(arr)); - memcpy(arr, INTERNAL_IN_IPV6_PREFIX.data(), prefix_size); - memcpy(arr + prefix_size, m_addr.data(), m_addr.size()); - return; - case NET_ONION: - case NET_I2P: - case NET_CJDNS: - break; - case NET_UNROUTABLE: - case NET_MAX: - assert(false); - } // no default case, so the compiler can warn about missing cases - - // Serialize ONION, I2P and CJDNS as all-zeros. - memset(arr, 0x0, V1_SERIALIZATION_SIZE); + /** + * Unserialize from a stream. + */ + template + void Unserialize(Stream& s) + { + if (s.GetVersion() & ADDRV2_FORMAT) { + UnserializeV2Stream(s); + } else { + UnserializeV1Stream(s); } + } - /** - * Serialize in pre-ADDRv2/BIP155 format to a stream. - */ - template - void SerializeV1Stream(Stream& s) const - { - uint8_t serialized[V1_SERIALIZATION_SIZE]; + friend class CNetAddrHash; + friend class CSubNet; - SerializeV1Array(serialized); +private: + /** + * Parse a Tor address and set this object to it. + * @param[in] addr Address to parse, must be a valid C string, for example + * pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion. + * @returns Whether the operation was successful. + * @see CNetAddr::IsTor() + */ + bool SetTor(const std::string& addr); + + /** + * Parse an I2P address and set this object to it. + * @param[in] addr Address to parse, must be a valid C string, for example + * ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p. + * @returns Whether the operation was successful. + * @see CNetAddr::IsI2P() + */ + bool SetI2P(const std::string& addr); + + /** + * BIP155 network ids recognized by this software. + */ + enum BIP155Network : uint8_t { + IPV4 = 1, + IPV6 = 2, + TORV2 = 3, + TORV3 = 4, + I2P = 5, + CJDNS = 6, + }; + + /** + * Size of CNetAddr when serialized as ADDRv1 (pre-BIP155) (in bytes). + */ + static constexpr size_t V1_SERIALIZATION_SIZE = ADDR_IPV6_SIZE; + + /** + * Maximum size of an address as defined in BIP155 (in bytes). + * This is only the size of the address, not the entire CNetAddr object + * when serialized. + */ + static constexpr size_t MAX_ADDRV2_SIZE = 512; + + /** + * Get the BIP155 network id of this address. + * Must not be called for IsInternal() objects. + * @returns BIP155 network id, except TORV2 which is no longer supported. + */ + BIP155Network GetBIP155Network() const; + + /** + * Set `m_net` from the provided BIP155 network id and size after validation. + * @retval true the network was recognized, is valid and `m_net` was set + * @retval false not recognised (from future?) and should be silently ignored + * @throws std::ios_base::failure if the network is one of the BIP155 founding + * networks (id 1..6) with wrong address size. + */ + bool SetNetFromBIP155Network(uint8_t possible_bip155_net, size_t address_size); + + /** + * Serialize in pre-ADDRv2/BIP155 format to an array. + */ + void SerializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) const + { + size_t prefix_size; + + switch (m_net) { + case NET_IPV6: + assert(m_addr.size() == sizeof(arr)); + memcpy(arr, m_addr.data(), m_addr.size()); + return; + case NET_IPV4: + prefix_size = sizeof(IPV4_IN_IPV6_PREFIX); + assert(prefix_size + m_addr.size() == sizeof(arr)); + memcpy(arr, IPV4_IN_IPV6_PREFIX.data(), prefix_size); + memcpy(arr + prefix_size, m_addr.data(), m_addr.size()); + return; + case NET_INTERNAL: + prefix_size = sizeof(INTERNAL_IN_IPV6_PREFIX); + assert(prefix_size + m_addr.size() == sizeof(arr)); + memcpy(arr, INTERNAL_IN_IPV6_PREFIX.data(), prefix_size); + memcpy(arr + prefix_size, m_addr.data(), m_addr.size()); + return; + case NET_ONION: + case NET_I2P: + case NET_CJDNS: + break; + case NET_UNROUTABLE: + case NET_MAX: + assert(false); + } // no default case, so the compiler can warn about missing cases + + // Serialize ONION, I2P and CJDNS as all-zeros. + memset(arr, 0x0, V1_SERIALIZATION_SIZE); + } - s << serialized; - } + /** + * Serialize in pre-ADDRv2/BIP155 format to a stream. + */ + template + void SerializeV1Stream(Stream& s) const + { + uint8_t serialized[V1_SERIALIZATION_SIZE]; - /** - * Serialize as ADDRv2 / BIP155. - */ - template - void SerializeV2Stream(Stream& s) const - { - if (IsInternal()) { - // Serialize NET_INTERNAL as embedded in IPv6. We need to - // serialize such addresses from addrman. - s << static_cast(BIP155Network::IPV6); - s << COMPACTSIZE(ADDR_IPV6_SIZE); - SerializeV1Stream(s); - return; - } + SerializeV1Array(serialized); - s << static_cast(GetBIP155Network()); - s << m_addr; - } + s << serialized; + } - /** - * Unserialize from a pre-ADDRv2/BIP155 format from an array. - */ - void UnserializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) - { - // Use SetLegacyIPv6() so that m_net is set correctly. For example - // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4). - SetLegacyIPv6(arr); + /** + * Serialize as ADDRv2 / BIP155. + */ + template + void SerializeV2Stream(Stream& s) const + { + if (IsInternal()) { + // Serialize NET_INTERNAL as embedded in IPv6. We need to + // serialize such addresses from addrman. + s << static_cast(BIP155Network::IPV6); + s << COMPACTSIZE(ADDR_IPV6_SIZE); + SerializeV1Stream(s); + return; } - /** - * Unserialize from a pre-ADDRv2/BIP155 format from a stream. - */ - template - void UnserializeV1Stream(Stream& s) - { - uint8_t serialized[V1_SERIALIZATION_SIZE]; + s << static_cast(GetBIP155Network()); + s << m_addr; + } + + /** + * Unserialize from a pre-ADDRv2/BIP155 format from an array. + */ + void UnserializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) + { + // Use SetLegacyIPv6() so that m_net is set correctly. For example + // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4). + SetLegacyIPv6(arr); + } + + /** + * Unserialize from a pre-ADDRv2/BIP155 format from a stream. + */ + template + void UnserializeV1Stream(Stream& s) + { + uint8_t serialized[V1_SERIALIZATION_SIZE]; + + s >> serialized; + + UnserializeV1Array(serialized); + } + + /** + * Unserialize from a ADDRv2 / BIP155 format. + */ + template + void UnserializeV2Stream(Stream& s) + { + uint8_t bip155_net; + s >> bip155_net; - s >> serialized; + size_t address_size; + s >> COMPACTSIZE(address_size); - UnserializeV1Array(serialized); + if (address_size > MAX_ADDRV2_SIZE) { + throw std::ios_base::failure(strprintf( + "Address too long: %u > %u", address_size, MAX_ADDRV2_SIZE)); } - /** - * Unserialize from a ADDRv2 / BIP155 format. - */ - template - void UnserializeV2Stream(Stream& s) - { - uint8_t bip155_net; - s >> bip155_net; - - size_t address_size; - s >> COMPACTSIZE(address_size); - - if (address_size > MAX_ADDRV2_SIZE) { - throw std::ios_base::failure(strprintf( - "Address too long: %u > %u", address_size, MAX_ADDRV2_SIZE)); + m_scope_id = 0; + + if (SetNetFromBIP155Network(bip155_net, address_size)) { + m_addr.resize(address_size); + s >> MakeSpan(m_addr); + + if (m_net != NET_IPV6) { + return; + } + + // Do some special checks on IPv6 addresses. + + // Recognize NET_INTERNAL embedded in IPv6, such addresses are not + // gossiped but could be coming from addrman, when unserializing from + // disk. + if (HasPrefix(m_addr, INTERNAL_IN_IPV6_PREFIX)) { + m_net = NET_INTERNAL; + memmove(m_addr.data(), m_addr.data() + INTERNAL_IN_IPV6_PREFIX.size(), + ADDR_INTERNAL_SIZE); + m_addr.resize(ADDR_INTERNAL_SIZE); + return; } - m_scope_id = 0; - - if (SetNetFromBIP155Network(bip155_net, address_size)) { - m_addr.resize(address_size); - s >> MakeSpan(m_addr); - - if (m_net != NET_IPV6) { - return; - } - - // Do some special checks on IPv6 addresses. - - // Recognize NET_INTERNAL embedded in IPv6, such addresses are not - // gossiped but could be coming from addrman, when unserializing from - // disk. - if (HasPrefix(m_addr, INTERNAL_IN_IPV6_PREFIX)) { - m_net = NET_INTERNAL; - memmove(m_addr.data(), m_addr.data() + INTERNAL_IN_IPV6_PREFIX.size(), - ADDR_INTERNAL_SIZE); - m_addr.resize(ADDR_INTERNAL_SIZE); - return; - } - - if (!HasPrefix(m_addr, IPV4_IN_IPV6_PREFIX) && - !HasPrefix(m_addr, TORV2_IN_IPV6_PREFIX)) { - return; - } - - // IPv4 and TORv2 are not supposed to be embedded in IPv6 (like in V1 - // encoding). Unserialize as !IsValid(), thus ignoring them. - } else { - // If we receive an unknown BIP155 network id (from the future?) then - // ignore the address - unserialize as !IsValid(). - s.ignore(address_size); + if (!HasPrefix(m_addr, IPV4_IN_IPV6_PREFIX) && + !HasPrefix(m_addr, TORV2_IN_IPV6_PREFIX)) { + return; } - // Mimic a default-constructed CNetAddr object which is !IsValid() and thus - // will not be gossiped, but continue reading next addresses from the stream. - m_net = NET_IPV6; - m_addr.assign(ADDR_IPV6_SIZE, 0x0); + // IPv4 and TORv2 are not supposed to be embedded in IPv6 (like in V1 + // encoding). Unserialize as !IsValid(), thus ignoring them. + } else { + // If we receive an unknown BIP155 network id (from the future?) then + // ignore the address - unserialize as !IsValid(). + s.ignore(address_size); } + + // Mimic a default-constructed CNetAddr object which is !IsValid() and thus + // will not be gossiped, but continue reading next addresses from the stream. + m_net = NET_IPV6; + m_addr.assign(ADDR_IPV6_SIZE, 0x0); + } }; class CNetAddrHash @@ -488,86 +485,86 @@ class CNetAddrHash class CSubNet { - protected: - /// Network (base) address - CNetAddr network; - /// Netmask, in network byte order - uint8_t netmask[16]; - /// Is this value valid? (only used to signal parse errors) - bool valid; - - bool SanityCheck() const; - - public: - /** - * Construct an invalid subnet (empty, `Match()` always returns false). - */ - CSubNet(); - - /** - * Construct from a given network start and number of bits (CIDR mask). - * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is - * created. - * @param[in] mask CIDR mask, must be in [0, 32] for IPv4 addresses and in [0, 128] for - * IPv6 addresses. Otherwise an invalid subnet is created. - */ - CSubNet(const CNetAddr& addr, uint8_t mask); - - /** - * Construct from a given network start and mask. - * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is - * created. - * @param[in] mask Network mask, must be of the same type as `addr` and not contain 0-bits - * followed by 1-bits. Otherwise an invalid subnet is created. - */ - CSubNet(const CNetAddr& addr, const CNetAddr& mask); - - /** - * Construct a single-host subnet. - * @param[in] addr The sole address to be contained in the subnet, can also be non-IPv[46]. - */ - explicit CSubNet(const CNetAddr& addr); - - bool Match(const CNetAddr &addr) const; - - std::string ToString() const; - bool IsValid() const; - - friend bool operator==(const CSubNet& a, const CSubNet& b); - friend bool operator!=(const CSubNet& a, const CSubNet& b) { return !(a == b); } - friend bool operator<(const CSubNet& a, const CSubNet& b); +protected: + /// Network (base) address + CNetAddr network; + /// Netmask, in network byte order + uint8_t netmask[16]; + /// Is this value valid? (only used to signal parse errors) + bool valid; + + bool SanityCheck() const; + +public: + /** + * Construct an invalid subnet (empty, `Match()` always returns false). + */ + CSubNet(); + + /** + * Construct from a given network start and number of bits (CIDR mask). + * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is + * created. + * @param[in] mask CIDR mask, must be in [0, 32] for IPv4 addresses and in [0, 128] for + * IPv6 addresses. Otherwise an invalid subnet is created. + */ + CSubNet(const CNetAddr& addr, uint8_t mask); + + /** + * Construct from a given network start and mask. + * @param[in] addr Network start. Must be IPv4 or IPv6, otherwise an invalid subnet is + * created. + * @param[in] mask Network mask, must be of the same type as `addr` and not contain 0-bits + * followed by 1-bits. Otherwise an invalid subnet is created. + */ + CSubNet(const CNetAddr& addr, const CNetAddr& mask); + + /** + * Construct a single-host subnet. + * @param[in] addr The sole address to be contained in the subnet, can also be non-IPv[46]. + */ + explicit CSubNet(const CNetAddr& addr); + + bool Match(const CNetAddr& addr) const; + + std::string ToString() const; + bool IsValid() const; + + friend bool operator==(const CSubNet& a, const CSubNet& b); + friend bool operator!=(const CSubNet& a, const CSubNet& b) { return !(a == b); } + friend bool operator<(const CSubNet& a, const CSubNet& b); }; /** A combination of a network address (CNetAddr) and a (TCP) port */ class CService : public CNetAddr { - protected: - uint16_t port; // host order - - public: - CService(); - CService(const CNetAddr& ip, uint16_t port); - CService(const struct in_addr& ipv4Addr, uint16_t port); - explicit CService(const struct sockaddr_in& addr); - uint16_t GetPort() const; - bool GetSockAddr(struct sockaddr* paddr, socklen_t *addrlen) const; - bool SetSockAddr(const struct sockaddr* paddr); - friend bool operator==(const CService& a, const CService& b); - friend bool operator!=(const CService& a, const CService& b) { return !(a == b); } - friend bool operator<(const CService& a, const CService& b); - std::vector GetKey() const; - std::string ToString() const; - std::string ToStringPort() const; - std::string ToStringIPPort() const; - - CService(const struct in6_addr& ipv6Addr, uint16_t port); - explicit CService(const struct sockaddr_in6& addr); - - SERIALIZE_METHODS(CService, obj) - { - READWRITEAS(CNetAddr, obj); - READWRITE(Using>(obj.port)); - } +protected: + uint16_t port; // host order + +public: + CService(); + CService(const CNetAddr& ip, uint16_t port); + CService(const struct in_addr& ipv4Addr, uint16_t port); + explicit CService(const struct sockaddr_in& addr); + uint16_t GetPort() const; + bool GetSockAddr(struct sockaddr* paddr, socklen_t* addrlen) const; + bool SetSockAddr(const struct sockaddr* paddr); + friend bool operator==(const CService& a, const CService& b); + friend bool operator!=(const CService& a, const CService& b) { return !(a == b); } + friend bool operator<(const CService& a, const CService& b); + std::vector GetKey() const; + std::string ToString() const; + std::string ToStringPort() const; + std::string ToStringIPPort() const; + + CService(const struct in6_addr& ipv6Addr, uint16_t port); + explicit CService(const struct sockaddr_in6& addr); + + SERIALIZE_METHODS(CService, obj) + { + READWRITEAS(CNetAddr, obj); + READWRITE(Using>(obj.port)); + } }; bool SanityCheckASMap(const std::vector& asmap); diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 63f75bc9a0..d5b56cb7cd 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -100,217 +100,217 @@ void AssertEqualAfterSerializeDeserialize(const T& obj, const int version = INIT } // namespace FUZZ_TARGET_DESERIALIZE(block_filter_deserialize, { - BlockFilter block_filter; - DeserializeFromFuzzingInput(buffer, block_filter); + BlockFilter block_filter; + DeserializeFromFuzzingInput(buffer, block_filter); }) FUZZ_TARGET_DESERIALIZE(addr_info_deserialize, { - CAddrInfo addr_info; - DeserializeFromFuzzingInput(buffer, addr_info); + CAddrInfo addr_info; + DeserializeFromFuzzingInput(buffer, addr_info); }) FUZZ_TARGET_DESERIALIZE(block_file_info_deserialize, { - CBlockFileInfo block_file_info; - DeserializeFromFuzzingInput(buffer, block_file_info); + CBlockFileInfo block_file_info; + DeserializeFromFuzzingInput(buffer, block_file_info); }) FUZZ_TARGET_DESERIALIZE(block_header_and_short_txids_deserialize, { - CBlockHeaderAndShortTxIDs block_header_and_short_txids; - DeserializeFromFuzzingInput(buffer, block_header_and_short_txids); + CBlockHeaderAndShortTxIDs block_header_and_short_txids; + DeserializeFromFuzzingInput(buffer, block_header_and_short_txids); }) FUZZ_TARGET_DESERIALIZE(fee_rate_deserialize, { - CFeeRate fee_rate; - DeserializeFromFuzzingInput(buffer, fee_rate); - AssertEqualAfterSerializeDeserialize(fee_rate); + CFeeRate fee_rate; + DeserializeFromFuzzingInput(buffer, fee_rate); + AssertEqualAfterSerializeDeserialize(fee_rate); }) FUZZ_TARGET_DESERIALIZE(merkle_block_deserialize, { - CMerkleBlock merkle_block; - DeserializeFromFuzzingInput(buffer, merkle_block); + CMerkleBlock merkle_block; + DeserializeFromFuzzingInput(buffer, merkle_block); }) FUZZ_TARGET_DESERIALIZE(out_point_deserialize, { - COutPoint out_point; - DeserializeFromFuzzingInput(buffer, out_point); - AssertEqualAfterSerializeDeserialize(out_point); + COutPoint out_point; + DeserializeFromFuzzingInput(buffer, out_point); + AssertEqualAfterSerializeDeserialize(out_point); }) FUZZ_TARGET_DESERIALIZE(partial_merkle_tree_deserialize, { - CPartialMerkleTree partial_merkle_tree; - DeserializeFromFuzzingInput(buffer, partial_merkle_tree); + CPartialMerkleTree partial_merkle_tree; + DeserializeFromFuzzingInput(buffer, partial_merkle_tree); }) FUZZ_TARGET_DESERIALIZE(pub_key_deserialize, { - CPubKey pub_key; - DeserializeFromFuzzingInput(buffer, pub_key); - AssertEqualAfterSerializeDeserialize(pub_key); + CPubKey pub_key; + DeserializeFromFuzzingInput(buffer, pub_key); + AssertEqualAfterSerializeDeserialize(pub_key); }) FUZZ_TARGET_DESERIALIZE(script_deserialize, { - CScript script; - DeserializeFromFuzzingInput(buffer, script); + CScript script; + DeserializeFromFuzzingInput(buffer, script); }) FUZZ_TARGET_DESERIALIZE(tx_in_deserialize, { - CTxIn tx_in; - DeserializeFromFuzzingInput(buffer, tx_in); - AssertEqualAfterSerializeDeserialize(tx_in); + CTxIn tx_in; + DeserializeFromFuzzingInput(buffer, tx_in); + AssertEqualAfterSerializeDeserialize(tx_in); }) FUZZ_TARGET_DESERIALIZE(flat_file_pos_deserialize, { - FlatFilePos flat_file_pos; - DeserializeFromFuzzingInput(buffer, flat_file_pos); - AssertEqualAfterSerializeDeserialize(flat_file_pos); + FlatFilePos flat_file_pos; + DeserializeFromFuzzingInput(buffer, flat_file_pos); + AssertEqualAfterSerializeDeserialize(flat_file_pos); }) FUZZ_TARGET_DESERIALIZE(key_origin_info_deserialize, { - KeyOriginInfo key_origin_info; - DeserializeFromFuzzingInput(buffer, key_origin_info); - AssertEqualAfterSerializeDeserialize(key_origin_info); + KeyOriginInfo key_origin_info; + DeserializeFromFuzzingInput(buffer, key_origin_info); + AssertEqualAfterSerializeDeserialize(key_origin_info); }) FUZZ_TARGET_DESERIALIZE(partially_signed_transaction_deserialize, { - PartiallySignedTransaction partially_signed_transaction; - DeserializeFromFuzzingInput(buffer, partially_signed_transaction); + PartiallySignedTransaction partially_signed_transaction; + DeserializeFromFuzzingInput(buffer, partially_signed_transaction); }) FUZZ_TARGET_DESERIALIZE(prefilled_transaction_deserialize, { - PrefilledTransaction prefilled_transaction; - DeserializeFromFuzzingInput(buffer, prefilled_transaction); + PrefilledTransaction prefilled_transaction; + DeserializeFromFuzzingInput(buffer, prefilled_transaction); }) FUZZ_TARGET_DESERIALIZE(psbt_input_deserialize, { - PSBTInput psbt_input; - DeserializeFromFuzzingInput(buffer, psbt_input); + PSBTInput psbt_input; + DeserializeFromFuzzingInput(buffer, psbt_input); }) FUZZ_TARGET_DESERIALIZE(psbt_output_deserialize, { - PSBTOutput psbt_output; - DeserializeFromFuzzingInput(buffer, psbt_output); + PSBTOutput psbt_output; + DeserializeFromFuzzingInput(buffer, psbt_output); }) FUZZ_TARGET_DESERIALIZE(block_deserialize, { - CBlock block; - DeserializeFromFuzzingInput(buffer, block); + CBlock block; + DeserializeFromFuzzingInput(buffer, block); }) FUZZ_TARGET_DESERIALIZE(blocklocator_deserialize, { - CBlockLocator bl; - DeserializeFromFuzzingInput(buffer, bl); + CBlockLocator bl; + DeserializeFromFuzzingInput(buffer, bl); }) FUZZ_TARGET_DESERIALIZE(blockmerkleroot, { - CBlock block; - DeserializeFromFuzzingInput(buffer, block); - bool mutated; - BlockMerkleRoot(block, &mutated); + CBlock block; + DeserializeFromFuzzingInput(buffer, block); + bool mutated; + BlockMerkleRoot(block, &mutated); }) FUZZ_TARGET_DESERIALIZE(addrman_deserialize, { - CAddrMan am; - DeserializeFromFuzzingInput(buffer, am); + CAddrMan am; + DeserializeFromFuzzingInput(buffer, am); }) FUZZ_TARGET_DESERIALIZE(blockheader_deserialize, { - CBlockHeader bh; - DeserializeFromFuzzingInput(buffer, bh); + CBlockHeader bh; + DeserializeFromFuzzingInput(buffer, bh); }) FUZZ_TARGET_DESERIALIZE(banentry_deserialize, { - CBanEntry be; - DeserializeFromFuzzingInput(buffer, be); + CBanEntry be; + DeserializeFromFuzzingInput(buffer, be); }) FUZZ_TARGET_DESERIALIZE(txundo_deserialize, { - CTxUndo tu; - DeserializeFromFuzzingInput(buffer, tu); + CTxUndo tu; + DeserializeFromFuzzingInput(buffer, tu); }) FUZZ_TARGET_DESERIALIZE(blockundo_deserialize, { - CBlockUndo bu; - DeserializeFromFuzzingInput(buffer, bu); + CBlockUndo bu; + DeserializeFromFuzzingInput(buffer, bu); }) FUZZ_TARGET_DESERIALIZE(coins_deserialize, { - Coin coin; - DeserializeFromFuzzingInput(buffer, coin); + Coin coin; + DeserializeFromFuzzingInput(buffer, coin); }) FUZZ_TARGET_DESERIALIZE(netaddr_deserialize, { - CNetAddr na; - DeserializeFromFuzzingInput(buffer, na); - if (na.IsAddrV1Compatible()) { - AssertEqualAfterSerializeDeserialize(na); - } - AssertEqualAfterSerializeDeserialize(na, INIT_PROTO_VERSION | ADDRV2_FORMAT); + CNetAddr na; + DeserializeFromFuzzingInput(buffer, na); + if (na.IsAddrV1Compatible()) { + AssertEqualAfterSerializeDeserialize(na); + } + AssertEqualAfterSerializeDeserialize(na, INIT_PROTO_VERSION | ADDRV2_FORMAT); }) FUZZ_TARGET_DESERIALIZE(service_deserialize, { - CService s; - DeserializeFromFuzzingInput(buffer, s); - if (s.IsAddrV1Compatible()) { - AssertEqualAfterSerializeDeserialize(s); - } - AssertEqualAfterSerializeDeserialize(s, INIT_PROTO_VERSION | ADDRV2_FORMAT); - CService s1; - DeserializeFromFuzzingInput(buffer, s1, INIT_PROTO_VERSION); - AssertEqualAfterSerializeDeserialize(s1, INIT_PROTO_VERSION); - assert(s1.IsAddrV1Compatible()); - CService s2; - DeserializeFromFuzzingInput(buffer, s2, INIT_PROTO_VERSION | ADDRV2_FORMAT); - AssertEqualAfterSerializeDeserialize(s2, INIT_PROTO_VERSION | ADDRV2_FORMAT); + CService s; + DeserializeFromFuzzingInput(buffer, s); + if (s.IsAddrV1Compatible()) { + AssertEqualAfterSerializeDeserialize(s); + } + AssertEqualAfterSerializeDeserialize(s, INIT_PROTO_VERSION | ADDRV2_FORMAT); + CService s1; + DeserializeFromFuzzingInput(buffer, s1, INIT_PROTO_VERSION); + AssertEqualAfterSerializeDeserialize(s1, INIT_PROTO_VERSION); + assert(s1.IsAddrV1Compatible()); + CService s2; + DeserializeFromFuzzingInput(buffer, s2, INIT_PROTO_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(s2, INIT_PROTO_VERSION | ADDRV2_FORMAT); }) FUZZ_TARGET_DESERIALIZE(messageheader_deserialize, { - CMessageHeader mh; - DeserializeFromFuzzingInput(buffer, mh); - (void)mh.IsCommandValid(); + CMessageHeader mh; + DeserializeFromFuzzingInput(buffer, mh); + (void)mh.IsCommandValid(); }) FUZZ_TARGET_DESERIALIZE(address_deserialize_v1_notime, { - CAddress a; - DeserializeFromFuzzingInput(buffer, a, INIT_PROTO_VERSION); - // A CAddress without nTime (as is expected under INIT_PROTO_VERSION) will roundtrip - // in all 5 formats (with/without nTime, v1/v2, network/disk) - AssertEqualAfterSerializeDeserialize(a, INIT_PROTO_VERSION); - AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); - AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); - AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); - AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); + CAddress a; + DeserializeFromFuzzingInput(buffer, a, INIT_PROTO_VERSION); + // A CAddress without nTime (as is expected under INIT_PROTO_VERSION) will roundtrip + // in all 5 formats (with/without nTime, v1/v2, network/disk) + AssertEqualAfterSerializeDeserialize(a, INIT_PROTO_VERSION); + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); + AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); }) FUZZ_TARGET_DESERIALIZE(address_deserialize_v1_withtime, { - CAddress a; - DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION); - // A CAddress in V1 mode will roundtrip in all 4 formats that have nTime. - AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); - AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); - AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); - AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); + CAddress a; + DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION); + // A CAddress in V1 mode will roundtrip in all 4 formats that have nTime. + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); + AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); }) FUZZ_TARGET_DESERIALIZE(address_deserialize_v2, { - CAddress a; - DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION | ADDRV2_FORMAT); - // A CAddress in V2 mode will roundtrip in both V2 formats, and also in the V1 formats - // with time if it's V1 compatible. - if (a.IsAddrV1Compatible()) { - AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); - AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); - } - AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); - AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); + CAddress a; + DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION | ADDRV2_FORMAT); + // A CAddress in V2 mode will roundtrip in both V2 formats, and also in the V1 formats + // with time if it's V1 compatible. + if (a.IsAddrV1Compatible()) { + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); + AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); + } + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); }) FUZZ_TARGET_DESERIALIZE(inv_deserialize, { - CInv i; - DeserializeFromFuzzingInput(buffer, i); + CInv i; + DeserializeFromFuzzingInput(buffer, i); }) FUZZ_TARGET_DESERIALIZE(bloomfilter_deserialize, { - CBloomFilter bf; - DeserializeFromFuzzingInput(buffer, bf); + CBloomFilter bf; + DeserializeFromFuzzingInput(buffer, bf); }) FUZZ_TARGET_DESERIALIZE(diskblockindex_deserialize, { - CDiskBlockIndex dbi; - DeserializeFromFuzzingInput(buffer, dbi); + CDiskBlockIndex dbi; + DeserializeFromFuzzingInput(buffer, dbi); }) FUZZ_TARGET_DESERIALIZE(txoutcompressor_deserialize, { - CTxOut to; - auto toc = Using(to); - DeserializeFromFuzzingInput(buffer, toc); + CTxOut to; + auto toc = Using(to); + DeserializeFromFuzzingInput(buffer, toc); }) FUZZ_TARGET_DESERIALIZE(blocktransactions_deserialize, { - BlockTransactions bt; - DeserializeFromFuzzingInput(buffer, bt); + BlockTransactions bt; + DeserializeFromFuzzingInput(buffer, bt); }) FUZZ_TARGET_DESERIALIZE(blocktransactionsrequest_deserialize, { - BlockTransactionsRequest btr; - DeserializeFromFuzzingInput(buffer, btr); + BlockTransactionsRequest btr; + DeserializeFromFuzzingInput(buffer, btr); }) FUZZ_TARGET_DESERIALIZE(snapshotmetadata_deserialize, { - SnapshotMetadata snapshot_metadata; - DeserializeFromFuzzingInput(buffer, snapshot_metadata); + SnapshotMetadata snapshot_metadata; + DeserializeFromFuzzingInput(buffer, snapshot_metadata); }) FUZZ_TARGET_DESERIALIZE(uint160_deserialize, { - uint160 u160; - DeserializeFromFuzzingInput(buffer, u160); - AssertEqualAfterSerializeDeserialize(u160); + uint160 u160; + DeserializeFromFuzzingInput(buffer, u160); + AssertEqualAfterSerializeDeserialize(u160); }) FUZZ_TARGET_DESERIALIZE(uint256_deserialize, { - uint256 u256; - DeserializeFromFuzzingInput(buffer, u256); - AssertEqualAfterSerializeDeserialize(u256); -}) - // Classes intentionally not covered in this file since their deserialization code is - // fuzzed elsewhere: - // * Deserialization of CTxOut is fuzzed in test/fuzz/tx_out.cpp - // * Deserialization of CMutableTransaction is fuzzed in src/test/fuzz/transaction.cpp + uint256 u256; + DeserializeFromFuzzingInput(buffer, u256); + AssertEqualAfterSerializeDeserialize(u256); +}) +// Classes intentionally not covered in this file since their deserialization code is +// fuzzed elsewhere: +// * Deserialization of CTxOut is fuzzed in test/fuzz/tx_out.cpp +// * Deserialization of CMutableTransaction is fuzzed in src/test/fuzz/transaction.cpp From 9c0871977839c28636eff975748182888299cd2b Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Fri, 28 May 2021 13:45:15 -0700 Subject: [PATCH 086/112] [test] Introduce test logic to query DNS seeds This commit introduces a DNS seed to the regest chain params in order to add coverage to the DNS querying logic. The first test checks that we do not query DNS seeds if we are able to succesfully connect to 2 outbound connections. Since we participate in ADDR relay with those connections, including sending a GETADDR message during the VERSION handshake, querying the DNS seeds is unnecessary. Co-authored-by: Martin Zumsande --- src/chainparams.cpp | 3 ++- test/functional/feature_config_args.py | 5 ++-- test/functional/p2p_dns_seeds.py | 34 ++++++++++++++++++++++++++ test/functional/test_runner.py | 1 + 4 files changed, 40 insertions(+), 3 deletions(-) create mode 100755 test/functional/p2p_dns_seeds.py diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 1b71c4db43..4f1ff1d512 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -435,7 +435,8 @@ class CRegTestParams : public CChainParams { assert(genesis.hashMerkleRoot == uint256S("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b")); vFixedSeeds.clear(); //!< Regtest mode doesn't have any fixed seeds. - vSeeds.clear(); //!< Regtest mode doesn't have any DNS seeds. + vSeeds.clear(); + vSeeds.emplace_back("dummySeed.invalid."); fDefaultConsistencyChecks = true; fRequireStandard = true; diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index de9d0d2e80..07b74e65de 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -158,8 +158,9 @@ def test_seed_peers(self): self.stop_node(0) # No peers.dat exists and -dnsseed=1 - # We expect the node will use DNS Seeds, but Regtest mode has 0 DNS seeds - # So after 60 seconds, the node should fallback to fixed seeds (this is a slow test) + # We expect the node will use DNS Seeds, but Regtest mode does not have + # any valid DNS seeds. So after 60 seconds, the node should fallback to + # fixed seeds assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) start = int(time.time()) with self.nodes[0].assert_debug_log(expected_msgs=[ diff --git a/test/functional/p2p_dns_seeds.py b/test/functional/p2p_dns_seeds.py new file mode 100755 index 0000000000..254f9af445 --- /dev/null +++ b/test/functional/p2p_dns_seeds.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test ThreadDNSAddressSeed logic for querying DNS seeds.""" + +from test_framework.p2p import P2PInterface +from test_framework.test_framework import BitcoinTestFramework + + +class P2PDNSSeeds(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 1 + self.extra_args = [["-dnsseed=1"]] + + def run_test(self): + self.existing_outbound_connections_test() + + def existing_outbound_connections_test(self): + # Make sure addrman is populated to enter the conditional where we + # delay and potentially skip DNS seeding. + self.nodes[0].addpeeraddress("192.0.0.8", 8333) + + self.log.info("Check that we *do not* query DNS seeds if we have 2 outbound connections") + + self.restart_node(0) + with self.nodes[0].assert_debug_log(expected_msgs=["P2P peers available. Skipped DNS seeding."], timeout=12): + for i in range(2): + self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay") + + +if __name__ == '__main__': + P2PDNSSeeds().main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 00527e78f1..1a59cfc755 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -121,6 +121,7 @@ 'wallet_listreceivedby.py --legacy-wallet', 'wallet_listreceivedby.py --descriptors', 'wallet_abandonconflict.py --legacy-wallet', + 'p2p_dns_seeds.py', 'wallet_abandonconflict.py --descriptors', 'feature_csv_activation.py', 'rpc_rawtransaction.py --legacy-wallet', From 75c05af361552eeecd100cee8cc40d4cd5a3aa27 Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Fri, 28 May 2021 13:49:29 -0700 Subject: [PATCH 087/112] [test] Test logic to query DNS seeds with block-relay-only connections When a node is able to properly shutdown, it will persist its block-relay-only connections to the addrman. On startup, it will attempt to reconnect to these anchors. Since block-relay-only connections do not participate in ADDR relay, succesful connections are insufficient to skip querying the DNS seeds. This test fails prior to the changes in #22013. Co-authored-by: Martin Zumsande --- test/functional/p2p_dns_seeds.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/functional/p2p_dns_seeds.py b/test/functional/p2p_dns_seeds.py index 254f9af445..b58607a5c9 100755 --- a/test/functional/p2p_dns_seeds.py +++ b/test/functional/p2p_dns_seeds.py @@ -16,6 +16,7 @@ def set_test_params(self): def run_test(self): self.existing_outbound_connections_test() + self.existing_block_relay_connections_test() def existing_outbound_connections_test(self): # Make sure addrman is populated to enter the conditional where we @@ -29,6 +30,23 @@ def existing_outbound_connections_test(self): for i in range(2): self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay") + def existing_block_relay_connections_test(self): + # Make sure addrman is populated to enter the conditional where we + # delay and potentially skip DNS seeding. No-op when run after + # existing_outbound_connections_test. + self.nodes[0].addpeeraddress("192.0.0.8", 8333) + + self.log.info("Check that we *do* query DNS seeds if we only have 2 block-relay-only connections") + + self.restart_node(0) + with self.nodes[0].assert_debug_log(expected_msgs=["Loading addresses from DNS seed"], timeout=12): + # This mimics the "anchors" logic where nodes are likely to + # reconnect to block-relay-only connections on startup. + # Since we do not participate in addr relay with these connections, + # we still want to query the DNS seeds. + for i in range(2): + self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="block-relay-only") + if __name__ == '__main__': P2PDNSSeeds().main() From 35851450a928ffacca240fadbf1747a42d5ba256 Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 24 May 2021 15:56:32 -0700 Subject: [PATCH 088/112] [test] Test the interactions between -connect and -dnsseed --- test/functional/p2p_dns_seeds.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/functional/p2p_dns_seeds.py b/test/functional/p2p_dns_seeds.py index b58607a5c9..213741f3a8 100755 --- a/test/functional/p2p_dns_seeds.py +++ b/test/functional/p2p_dns_seeds.py @@ -15,9 +15,22 @@ def set_test_params(self): self.extra_args = [["-dnsseed=1"]] def run_test(self): + self.init_arg_tests() self.existing_outbound_connections_test() self.existing_block_relay_connections_test() + def init_arg_tests(self): + fakeaddr = "fakenodeaddr.fakedomain.invalid." + + self.log.info("Check that setting -connect disables -dnsseed by default") + self.nodes[0].stop_node() + with self.nodes[0].assert_debug_log(expected_msgs=["DNS seeding disabled"]): + self.start_node(0, [f"-connect={fakeaddr}"]) + + self.log.info("Check that running -connect and -dnsseed means DNS logic runs.") + with self.nodes[0].assert_debug_log(expected_msgs=["Loading addresses from DNS seed"], timeout=12): + self.restart_node(0, [f"-connect={fakeaddr}", "-dnsseed=1"]) + def existing_outbound_connections_test(self): # Make sure addrman is populated to enter the conditional where we # delay and potentially skip DNS seeding. From 26d0ffe4f2573e0297c9b0e095c2a0868929b08b Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 24 May 2021 17:08:54 -0700 Subject: [PATCH 089/112] [test] Test -forcednsseed causes querying DNS seeds --- test/functional/p2p_dns_seeds.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/functional/p2p_dns_seeds.py b/test/functional/p2p_dns_seeds.py index 213741f3a8..4714de64dc 100755 --- a/test/functional/p2p_dns_seeds.py +++ b/test/functional/p2p_dns_seeds.py @@ -18,6 +18,7 @@ def run_test(self): self.init_arg_tests() self.existing_outbound_connections_test() self.existing_block_relay_connections_test() + self.force_dns_test() def init_arg_tests(self): fakeaddr = "fakenodeaddr.fakedomain.invalid." @@ -60,6 +61,17 @@ def existing_block_relay_connections_test(self): for i in range(2): self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="block-relay-only") + def force_dns_test(self): + self.log.info("Check that we query DNS seeds if -forcednsseed param is set") + + with self.nodes[0].assert_debug_log(expected_msgs=["Loading addresses from DNS seed"], timeout=12): + # -dnsseed defaults to 1 in bitcoind, but 0 in the test framework, + # so pass it explicitly here + self.restart_node(0, ["-forcednsseed", "-dnsseed=1"]) + + # Restore default for subsequent tests + self.restart_node(0) + if __name__ == '__main__': P2PDNSSeeds().main() From 6f6b7df6bdcf863af160c0426e3a22028ca8259a Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Fri, 28 May 2021 13:30:35 -0700 Subject: [PATCH 090/112] [init] Disallow starting up with conflicting paramters for -dnsseed and -forcednsseed -dnsseed determines whether we run ThreadDNSAddressSeed and potentially query the DNS seeds for addresses. -forcednsseed tells the node to force querying the DNS seeds even if we have sufficient addresses or current connections. This commit disallows starting up with explicitly conflicting parameters. --- src/init.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/init.cpp b/src/init.cpp index 593128747e..11a87dd441 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -853,6 +853,11 @@ bool AppInitParameterInteraction(const ArgsManager& args) return InitError(_("Prune mode is incompatible with -coinstatsindex.")); } + // If -forcednsseed is set to true, ensure -dnsseed has not been set to false + if (args.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED) && !args.GetBoolArg("-dnsseed", DEFAULT_DNSSEED)){ + return InitError(_("Cannot set -forcednsseed to true when setting -dnsseed to false.")); + } + // -bind and -whitebind can't be set when not listening size_t nUserBind = args.GetArgs("-bind").size() + args.GetArgs("-whitebind").size(); if (nUserBind != 0 && !args.GetBoolArg("-listen", DEFAULT_LISTEN)) { From 6395c8ed5689ea72e9a1618f14551775246f6361 Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Fri, 28 May 2021 13:38:46 -0700 Subject: [PATCH 091/112] [test] Test the interactions between -forcednsseed and -dnsseed Test that passing conflicting parameters for the two causes a startup error. This logic also impacts -connect, which soft sets -dnsseed, so add a test for that too. --- test/functional/p2p_dns_seeds.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/functional/p2p_dns_seeds.py b/test/functional/p2p_dns_seeds.py index 4714de64dc..d9977dccee 100755 --- a/test/functional/p2p_dns_seeds.py +++ b/test/functional/p2p_dns_seeds.py @@ -32,6 +32,24 @@ def init_arg_tests(self): with self.nodes[0].assert_debug_log(expected_msgs=["Loading addresses from DNS seed"], timeout=12): self.restart_node(0, [f"-connect={fakeaddr}", "-dnsseed=1"]) + self.log.info("Check that running -forcednsseed and -dnsseed=0 throws an error.") + self.nodes[0].stop_node() + self.nodes[0].assert_start_raises_init_error( + expected_msg="Error: Cannot set -forcednsseed to true when setting -dnsseed to false.", + extra_args=["-forcednsseed=1", "-dnsseed=0"], + ) + + self.log.info("Check that running -forcednsseed and -connect throws an error.") + # -connect soft sets -dnsseed to false, so throws the same error + self.nodes[0].stop_node() + self.nodes[0].assert_start_raises_init_error( + expected_msg="Error: Cannot set -forcednsseed to true when setting -dnsseed to false.", + extra_args=["-forcednsseed=1", f"-connect={fakeaddr}"], + ) + + # Restore default bitcoind settings + self.restart_node(0) + def existing_outbound_connections_test(self): # Make sure addrman is populated to enter the conditional where we # delay and potentially skip DNS seeding. From 4c89e24f64c1dc1a56a3bcb6b5e2b4fb95e8b29f Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 24 May 2021 17:56:22 -0700 Subject: [PATCH 092/112] [test] Test the delay before querying DNS seeds When starting up with a populated addrman, ThreadDNSAddressSeed adds a delay during which time the node may be able to connect to some peers. This commit tests the delay changes based on the number of addresses in the addrman. --- test/functional/p2p_dns_seeds.py | 34 ++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/test/functional/p2p_dns_seeds.py b/test/functional/p2p_dns_seeds.py index d9977dccee..e58ad8e0fc 100755 --- a/test/functional/p2p_dns_seeds.py +++ b/test/functional/p2p_dns_seeds.py @@ -4,6 +4,8 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test ThreadDNSAddressSeed logic for querying DNS seeds.""" +import itertools + from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework @@ -19,6 +21,7 @@ def run_test(self): self.existing_outbound_connections_test() self.existing_block_relay_connections_test() self.force_dns_test() + self.wait_time_tests() def init_arg_tests(self): fakeaddr = "fakenodeaddr.fakedomain.invalid." @@ -90,6 +93,37 @@ def force_dns_test(self): # Restore default for subsequent tests self.restart_node(0) + def wait_time_tests(self): + self.log.info("Check the delay before querying DNS seeds") + + # Populate addrman with < 1000 addresses + for i in range(5): + a = f"192.0.0.{i}" + self.nodes[0].addpeeraddress(a, 8333) + + # The delay should be 11 seconds + with self.nodes[0].assert_debug_log(expected_msgs=["Waiting 11 seconds before querying DNS seeds.\n"]): + self.restart_node(0) + + # Populate addrman with > 1000 addresses + for i in itertools.count(): + first_octet = i % 2 + 1 + second_octet = i % 256 + third_octet = i % 100 + a = f"{first_octet}.{second_octet}.{third_octet}.1" + self.nodes[0].addpeeraddress(a, 8333) + if (i > 1000 and i % 100 == 0): + # The addrman size is non-deterministic because new addresses + # are sorted into buckets, potentially displacing existing + # addresses. Periodically check if we have met the desired + # threshold. + if len(self.nodes[0].getnodeaddresses(0)) > 1000: + break + + # The delay should be 5 mins + with self.nodes[0].assert_debug_log(expected_msgs=["Waiting 300 seconds before querying DNS seeds.\n"]): + self.restart_node(0) + if __name__ == '__main__': P2PDNSSeeds().main() From 82b6f89819e55af26f5264678e0f93052934bcb3 Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 24 May 2021 12:57:50 -0700 Subject: [PATCH 093/112] [style] Small style improvements to DNS parameters --- src/init.cpp | 2 +- src/net.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 11a87dd441..1cf7dce8f7 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -431,7 +431,7 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-dnsseed", strprintf("Query for peer addresses via DNS lookup, if low on addresses (default: %u unless -connect used)", DEFAULT_DNSSEED), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); argsman.AddArg("-externalip=", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-fixedseeds", strprintf("Allow fixed seeds if DNS seeds don't provide peers (default: %u)", DEFAULT_FIXEDSEEDS), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); - argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxconnections=", strprintf("Maintain at most connections to peers (default: %u). This limit does not apply to connections manually added via -addnode or the addnode RPC, which have a separate limit of %u.", DEFAULT_MAX_PEER_CONNECTIONS, MAX_ADDNODE_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); diff --git a/src/net.h b/src/net.h index b43916c55e..bacce326fe 100644 --- a/src/net.h +++ b/src/net.h @@ -79,9 +79,9 @@ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; /** Number of file descriptors required for message capture **/ static const int NUM_FDS_MESSAGE_CAPTURE = 1; -static const bool DEFAULT_FORCEDNSSEED = false; -static const bool DEFAULT_DNSSEED = true; -static const bool DEFAULT_FIXEDSEEDS = true; +static constexpr bool DEFAULT_FORCEDNSSEED{false}; +static constexpr bool DEFAULT_DNSSEED{true}; +static constexpr bool DEFAULT_FIXEDSEEDS{true}; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; From 703b1e612a4bd4521e20ae21eb8fb7c19f4ef942 Mon Sep 17 00:00:00 2001 From: Larry Ruane Date: Wed, 28 Jul 2021 16:18:53 -0600 Subject: [PATCH 094/112] Close minor startup race between main and scheduler threads Don't schedule class PeerManagerImpl's background tasks from its constructor, but instead do that from a separate method, StartScheduledTasks(), that can be called later at the end of startup, after other things, such as the active chain, are initialzed. --- src/init.cpp | 4 +++- src/net_processing.cpp | 13 +++++++++---- src/net_processing.h | 5 ++++- src/test/denialofservice_tests.cpp | 8 ++++---- src/test/util/setup_common.cpp | 2 +- 5 files changed, 21 insertions(+), 11 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 75394d96b1..a0f318b8bc 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1180,7 +1180,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) assert(!node.peerman); node.peerman = PeerManager::make(chainparams, *node.connman, *node.addrman, node.banman.get(), - *node.scheduler, chainman, *node.mempool, ignores_incoming_txs); + chainman, *node.mempool, ignores_incoming_txs); RegisterValidationInterface(node.peerman.get()); // sanitize comments per BIP-0014, format user agent and check total size @@ -1789,6 +1789,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) banman->DumpBanlist(); }, DUMP_BANS_INTERVAL); + if (node.peerman) node.peerman->StartScheduledTasks(*node.scheduler); + #if HAVE_SYSTEM StartupNotify(args); #endif diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 2538904ade..6cc99d39f7 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -271,7 +271,7 @@ class PeerManagerImpl final : public PeerManager { public: PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman, - BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, + BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, bool ignore_incoming_txs); /** Overridden from CValidationInterface. */ @@ -288,6 +288,7 @@ class PeerManagerImpl final : public PeerManager bool SendMessages(CNode* pto) override EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing); /** Implement PeerManager */ + void StartScheduledTasks(CScheduler& scheduler) override; void CheckForStaleTipAndEvictPeers() override; bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override; bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; } @@ -1396,14 +1397,14 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) } std::unique_ptr PeerManager::make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman, - BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, + BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, bool ignore_incoming_txs) { - return std::make_unique(chainparams, connman, addrman, banman, scheduler, chainman, pool, ignore_incoming_txs); + return std::make_unique(chainparams, connman, addrman, banman, chainman, pool, ignore_incoming_txs); } PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman, - BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, + BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, bool ignore_incoming_txs) : m_chainparams(chainparams), m_connman(connman), @@ -1412,6 +1413,10 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn m_chainman(chainman), m_mempool(pool), m_ignore_incoming_txs(ignore_incoming_txs) +{ +} + +void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) { // Stale tip checking and peer eviction are on two different timers, but we // don't want them to get out of sync due to drift in the scheduler, so we diff --git a/src/net_processing.h b/src/net_processing.h index c537efb5db..66492899ee 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -37,10 +37,13 @@ class PeerManager : public CValidationInterface, public NetEventsInterface { public: static std::unique_ptr make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman, - BanMan* banman, CScheduler& scheduler, ChainstateManager& chainman, + BanMan* banman, ChainstateManager& chainman, CTxMemPool& pool, bool ignore_incoming_txs); virtual ~PeerManager() { } + /** Begin running background tasks, should only be called once */ + virtual void StartScheduledTasks(CScheduler& scheduler) = 0; + /** Get statistics from node state */ virtual bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const = 0; diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index 5668ead1fb..0bfe6eecd9 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -53,7 +53,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) const CChainParams& chainparams = Params(); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman); auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr, - *m_node.scheduler, *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false); // Mock an outbound peer CAddress addr1(ip(0xa0b0c001), NODE_NONE); @@ -121,7 +121,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management) const CChainParams& chainparams = Params(); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman); auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr, - *m_node.scheduler, *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false); constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS; CConnman::Options options; @@ -194,7 +194,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) auto banman = std::make_unique(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman); auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(), - *m_node.scheduler, *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false); CNetAddr tor_netaddr; BOOST_REQUIRE( @@ -288,7 +288,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) auto banman = std::make_unique(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = std::make_unique(0x1337, 0x1337, *m_node.addrman); auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(), - *m_node.scheduler, *m_node.chainman, *m_node.mempool, false); + *m_node.chainman, *m_node.mempool, false); banman->ClearBanned(); int64_t nStartTime = GetTime(); diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 5334c4623e..2d044af184 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -197,7 +197,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); m_node.connman = std::make_unique(0x1337, 0x1337, *m_node.addrman); // Deterministic randomness for tests. m_node.peerman = PeerManager::make(chainparams, *m_node.connman, *m_node.addrman, - m_node.banman.get(), *m_node.scheduler, *m_node.chainman, + m_node.banman.get(), *m_node.chainman, *m_node.mempool, false); { CConnman::Options options; From ca6c154ef116e8d2e0484cdb1af13b34a0c86c17 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sat, 31 Jul 2021 21:23:16 +0200 Subject: [PATCH 095/112] test: refactor: remove `hex_str_to_bytes` helper Use the built-in class method bytes.fromhex() instead, which is available since Python 3.0. --- test/functional/feature_dbcrash.py | 3 +-- test/functional/feature_segwit.py | 19 +++++++++---------- test/functional/interface_rest.py | 3 +-- test/functional/p2p_invalid_messages.py | 7 +++---- test/functional/p2p_segwit.py | 5 ++--- test/functional/rpc_addresses_deprecation.py | 3 +-- test/functional/rpc_decodescript.py | 13 ++++++------- test/functional/rpc_signrawtransaction.py | 3 +-- test/functional/test_framework/address.py | 10 +++++----- test/functional/test_framework/blocktools.py | 7 +++---- test/functional/test_framework/messages.py | 4 ++-- test/functional/test_framework/script_util.py | 7 +++---- test/functional/test_framework/util.py | 7 +------ test/functional/test_framework/wallet.py | 3 +-- test/functional/test_framework/wallet_util.py | 3 +-- 15 files changed, 40 insertions(+), 57 deletions(-) diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index c532300ce2..6d8e5430f8 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -41,7 +41,6 @@ from test_framework.util import ( assert_equal, create_confirmed_utxos, - hex_str_to_bytes, ) @@ -204,7 +203,7 @@ def generate_small_transactions(self, node, count, utxo_list): continue for _ in range(3): - tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) + tx.vout.append(CTxOut(output_amount, bytes.fromhex(utxo['scriptPubKey']))) # Sign and send the transaction to get into the mempool tx_signed_hex = node.signrawtransactionwithwallet(tx.serialize().hex())['hex'] diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index 9cf46d9d11..f36575627e 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -46,7 +46,6 @@ assert_equal, assert_is_hex_string, assert_raises_rpc_error, - hex_str_to_bytes, try_rpc, ) @@ -140,7 +139,7 @@ def run_test(self): for i in range(3): newaddress = self.nodes[i].getnewaddress() self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"]) - multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG]) + multiscript = CScript([OP_1, bytes.fromhex(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG]) p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address'] bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address'] assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript)) @@ -352,7 +351,7 @@ def run_test(self): # Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address. multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address'] - script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG]) + script = CScript([OP_2, bytes.fromhex(pubkeys[3]), bytes.fromhex(pubkeys[4]), OP_2, OP_CHECKMULTISIG]) solvable_after_importaddress.append(script_to_p2sh_script(script)) for i in compressed_spendable_address: @@ -426,7 +425,7 @@ def run_test(self): op1 = CScript([OP_1]) op0 = CScript([OP_0]) # 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V - unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D") + unsolvable_address_key = bytes.fromhex("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D") unsolvablep2pkh = key_to_p2pkh_script(unsolvable_address_key) unsolvablep2wshp2pkh = script_to_p2wsh_script(unsolvablep2pkh) p2shop0 = script_to_p2sh_script(op0) @@ -448,11 +447,11 @@ def run_test(self): for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address: v = self.nodes[0].getaddressinfo(i) if (v['isscript']): - bare = hex_str_to_bytes(v['hex']) + bare = bytes.fromhex(v['hex']) importlist.append(bare.hex()) importlist.append(script_to_p2wsh_script(bare).hex()) else: - pubkey = hex_str_to_bytes(v['pubkey']) + pubkey = bytes.fromhex(v['pubkey']) p2pk = CScript([pubkey, OP_CHECKSIG]) p2pkh = key_to_p2pkh_script(pubkey) importlist.append(p2pk.hex()) @@ -612,18 +611,18 @@ def mine_and_test_listunspent(self, script_list, ismine): return txid def p2sh_address_to_script(self, v): - bare = CScript(hex_str_to_bytes(v['hex'])) - p2sh = CScript(hex_str_to_bytes(v['scriptPubKey'])) + bare = CScript(bytes.fromhex(v['hex'])) + p2sh = CScript(bytes.fromhex(v['scriptPubKey'])) p2wsh = script_to_p2wsh_script(bare) p2sh_p2wsh = script_to_p2sh_script(p2wsh) return([bare, p2sh, p2wsh, p2sh_p2wsh]) def p2pkh_address_to_script(self, v): - pubkey = hex_str_to_bytes(v['pubkey']) + pubkey = bytes.fromhex(v['pubkey']) p2wpkh = key_to_p2wpkh_script(pubkey) p2sh_p2wpkh = script_to_p2sh_script(p2wpkh) p2pk = CScript([pubkey, OP_CHECKSIG]) - p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey'])) + p2pkh = CScript(bytes.fromhex(v['scriptPubKey'])) p2sh_p2pk = script_to_p2sh_script(p2pk) p2sh_p2pkh = script_to_p2sh_script(p2pkh) p2wsh_p2pk = script_to_p2wsh_script(p2pk) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index e73ec90819..0cd6a7b0c6 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -19,7 +19,6 @@ assert_equal, assert_greater_than, assert_greater_than_or_equal, - hex_str_to_bytes, ) from test_framework.messages import BLOCK_HEADER_SIZE @@ -147,7 +146,7 @@ def run_test(self): bin_request = b'\x01\x02' for txid, n in [spending, spent]: - bin_request += hex_str_to_bytes(txid) + bin_request += bytes.fromhex(txid) bin_request += pack("i", n) bin_response = self.test_rest_request("/getutxos", http_method='POST', req_type=ReqType.BIN, body=bin_request, ret_type=RetType.BYTES) diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index 9c34506320..f3b80abb59 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -28,7 +28,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - hex_str_to_bytes, ) VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 # Account for the 5-byte length prefix @@ -187,7 +186,7 @@ def test_addrv2_no_addresses(self): [ 'received: addrv2 (1 bytes)', ], - hex_str_to_bytes('00')) + bytes.fromhex('00')) def test_addrv2_too_long_address(self): self.test_addrv2('too long address', @@ -196,7 +195,7 @@ def test_addrv2_too_long_address(self): 'ProcessMessages(addrv2, 525 bytes): Exception', 'Address too long: 513 > 512', ], - hex_str_to_bytes( + bytes.fromhex( '01' + # number of entries '61bc6649' + # time, Fri Jan 9 02:54:25 UTC 2009 '00' + # service flags, COMPACTSIZE(NODE_NONE) @@ -213,7 +212,7 @@ def test_addrv2_unrecognized_network(self): 'IP 9.9.9.9 mapped', 'Added 1 addresses', ], - hex_str_to_bytes( + bytes.fromhex( '02' + # number of entries # this should be ignored without impeding acceptance of subsequent ones now_hex + # time diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 929efdd91c..c53feaf0c6 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -81,7 +81,6 @@ from test_framework.util import ( assert_equal, softfork_active, - hex_str_to_bytes, assert_raises_rpc_error, ) @@ -415,7 +414,7 @@ def test_block_relay(self): block = self.test_node.request_block(block_hash, 2) wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG) assert_equal(block.serialize(), wit_block.serialize()) - assert_equal(block.serialize(), hex_str_to_bytes(rpc_block)) + assert_equal(block.serialize(), bytes.fromhex(rpc_block)) else: # After activation, witness blocks and non-witness blocks should # be different. Verify rpc getblock() returns witness blocks, while @@ -430,7 +429,7 @@ def test_block_relay(self): rpc_block = self.nodes[0].getblock(block.hash, False) non_wit_block = self.test_node.request_block(block.sha256, 2) wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG) - assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block)) + assert_equal(wit_block.serialize(), bytes.fromhex(rpc_block)) assert_equal(wit_block.serialize(False), non_wit_block.serialize()) assert_equal(wit_block.serialize(), block.serialize()) diff --git a/test/functional/rpc_addresses_deprecation.py b/test/functional/rpc_addresses_deprecation.py index ac430f5b39..251cc85ae9 100755 --- a/test/functional/rpc_addresses_deprecation.py +++ b/test/functional/rpc_addresses_deprecation.py @@ -10,7 +10,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - hex_str_to_bytes ) @@ -36,7 +35,7 @@ def test_addresses_deprecation(self): # This transaction is derived from test/util/data/txcreatemultisig1.json tx = tx_from_hex(signed) - tx.vout[0].scriptPubKey = hex_str_to_bytes("522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae") + tx.vout[0].scriptPubKey = bytes.fromhex("522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae") tx_signed = node.signrawtransactionwithwallet(tx.serialize().hex())['hex'] txid = node.sendrawtransaction(hexstring=tx_signed, maxfeerate=0) diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py index f6643c7167..5b1514af6f 100755 --- a/test/functional/rpc_decodescript.py +++ b/test/functional/rpc_decodescript.py @@ -11,7 +11,6 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - hex_str_to_bytes, ) @@ -86,7 +85,7 @@ def decodescript_script_pub_key(self): rpc_result = self.nodes[0].decodescript(multisig_script) assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm']) # multisig in P2WSH - multisig_script_hash = sha256(hex_str_to_bytes(multisig_script)).hex() + multisig_script_hash = sha256(bytes.fromhex(multisig_script)).hex() assert_equal('0 ' + multisig_script_hash, rpc_result['segwit']['asm']) # 4) P2SH scriptPubKey @@ -124,7 +123,7 @@ def decodescript_script_pub_key(self): rpc_result = self.nodes[0].decodescript(cltv_script) assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm']) # CLTV script in P2WSH - cltv_script_hash = sha256(hex_str_to_bytes(cltv_script)).hex() + cltv_script_hash = sha256(bytes.fromhex(cltv_script)).hex() assert_equal('0 ' + cltv_script_hash, rpc_result['segwit']['asm']) # 7) P2PK scriptPubKey @@ -209,23 +208,23 @@ def decoderawtransaction_asm_sighashtype(self): signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]' # 1) P2PK scriptSig - txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) + txSave.vin[0].scriptSig = bytes.fromhex(push_signature) rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex()) assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # make sure that the sighash decodes come out correctly for a more complex / lesser used case. - txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) + txSave.vin[0].scriptSig = bytes.fromhex(push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex()) assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 2) multisig scriptSig - txSave.vin[0].scriptSig = hex_str_to_bytes('00' + push_signature + push_signature_2) + txSave.vin[0].scriptSig = bytes.fromhex('00' + push_signature + push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex()) assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 3) test a scriptSig that contains more than push operations. # in fact, it contains an OP_RETURN with data specially crafted to cause improper decode if the code does not catch it. - txSave.vin[0].scriptSig = hex_str_to_bytes('6a143011020701010101010101020601010101010101') + txSave.vin[0].scriptSig = bytes.fromhex('6a143011020701010101010101020601010101010101') rpc_result = self.nodes[0].decoderawtransaction(txSave.serialize().hex()) assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index 571029155e..312a4abbc3 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -20,7 +20,6 @@ assert_raises_rpc_error, find_vout_for_address, generate_to_height, - hex_str_to_bytes, ) from test_framework.messages import ( CTxInWitness, @@ -233,7 +232,7 @@ def verify_txn_with_witness_script(self, tx_type): embedded_pubkey = eckey.get_pubkey().get_bytes().hex() witness_script = { 'P2PKH': key_to_p2pkh_script(embedded_pubkey).hex(), - 'P2PK': CScript([hex_str_to_bytes(embedded_pubkey), OP_CHECKSIG]).hex() + 'P2PK': CScript([bytes.fromhex(embedded_pubkey), OP_CHECKSIG]).hex() }.get(tx_type, "Invalid tx_type") redeem_script = script_to_p2wsh_script(witness_script).hex() addr = script_to_p2sh(redeem_script) diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py index 360962b8da..fe733e9368 100644 --- a/test/functional/test_framework/address.py +++ b/test/functional/test_framework/address.py @@ -12,7 +12,7 @@ from .script import hash256, hash160, sha256, CScript, OP_0 from .segwit_addr import encode_segwit_address -from .util import assert_equal, hex_str_to_bytes +from .util import assert_equal ADDRESS_BCRT1_UNSPENDABLE = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj' ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR = 'addr(bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj)#juyq9d97' @@ -33,7 +33,7 @@ def byte_to_base58(b, version): result = '' str = b.hex() str = chr(version).encode('latin-1').hex() + str - checksum = hash256(hex_str_to_bytes(str)).hex() + checksum = hash256(bytes.fromhex(str)).hex() str += checksum[:8] value = int('0x' + str, 0) while value > 0: @@ -100,7 +100,7 @@ def key_to_p2sh_p2wpkh(key, main=False): def program_to_witness(version, program, main=False): if (type(program) is str): - program = hex_str_to_bytes(program) + program = bytes.fromhex(program) assert 0 <= version <= 16 assert 2 <= len(program) <= 40 assert version > 0 or len(program) in [20, 32] @@ -121,14 +121,14 @@ def script_to_p2sh_p2wsh(script, main=False): def check_key(key): if (type(key) is str): - key = hex_str_to_bytes(key) # Assuming this is hex string + key = bytes.fromhex(key) # Assuming this is hex string if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): return key assert False def check_script(script): if (type(script) is str): - script = hex_str_to_bytes(script) # Assuming this is hex string + script = bytes.fromhex(script) # Assuming this is hex string if (type(script) is bytes or type(script) is CScript): return script assert False diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 81e931e976..5e6c9f37e8 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -24,7 +24,6 @@ CTxInWitness, CTxOut, hash256, - hex_str_to_bytes, ser_uint256, tx_from_hex, uint256_from_str, @@ -214,7 +213,7 @@ def witness_script(use_p2wsh, pubkey): pkscript = key_to_p2wpkh_script(pubkey) else: # 1-of-1 multisig - witness_script = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG]) + witness_script = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG]) pkscript = script_to_p2wsh_script(witness_script) return pkscript.hex() @@ -223,7 +222,7 @@ def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount): Optionally wrap the segwit output using P2SH.""" if use_p2wsh: - program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG]) + program = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG]) addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program) else: addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey) @@ -246,7 +245,7 @@ def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=Tru else: if (insert_redeem_script): tx = tx_from_hex(tx_to_witness) - tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)]) + tx.vin[0].scriptSig += CScript([bytes.fromhex(insert_redeem_script)]) tx_to_witness = tx.serialize().hex() return node.sendrawtransaction(tx_to_witness) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 065e8961ae..0523627cef 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -30,7 +30,7 @@ import time from test_framework.siphash import siphash256 -from test_framework.util import hex_str_to_bytes, assert_equal +from test_framework.util import assert_equal MAX_LOCATOR_SZ = 101 MAX_BLOCK_BASE_SIZE = 1000000 @@ -197,7 +197,7 @@ def from_hex(obj, hex_string): Note that there is no complementary helper like e.g. `to_hex` for the inverse operation. To serialize a message object to a hex string, simply use obj.serialize().hex()""" - obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) + obj.deserialize(BytesIO(bytes.fromhex(hex_string))) return obj diff --git a/test/functional/test_framework/script_util.py b/test/functional/test_framework/script_util.py index 457be6b0e6..5d1d7ea45c 100755 --- a/test/functional/test_framework/script_util.py +++ b/test/functional/test_framework/script_util.py @@ -4,7 +4,6 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Useful Script constants and utils.""" from test_framework.script import CScript, hash160, sha256, OP_0, OP_DUP, OP_HASH160, OP_CHECKSIG, OP_EQUAL, OP_EQUALVERIFY -from test_framework.util import hex_str_to_bytes # To prevent a "tx-size-small" policy rule error, a transaction has to have a # non-witness size of at least 82 bytes (MIN_STANDARD_TX_NONWITNESS_SIZE in @@ -49,7 +48,7 @@ def key_to_p2sh_p2wpkh_script(key, main = False): def program_to_witness_script(version, program, main = False): if isinstance(program, str): - program = hex_str_to_bytes(program) + program = bytes.fromhex(program) assert 0 <= version <= 16 assert 2 <= len(program) <= 40 assert version > 0 or len(program) in [20, 32] @@ -70,14 +69,14 @@ def script_to_p2sh_p2wsh_script(script, main = False): def check_key(key): if isinstance(key, str): - key = hex_str_to_bytes(key) # Assuming this is hex string + key = bytes.fromhex(key) # Assuming this is hex string if isinstance(key, bytes) and (len(key) == 33 or len(key) == 65): return key assert False def check_script(script): if isinstance(script, str): - script = hex_str_to_bytes(script) # Assuming this is hex string + script = bytes.fromhex(script) # Assuming this is hex string if isinstance(script, bytes) or isinstance(script, CScript): return script assert False diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index a9a6adcfc8..54f2fdee21 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -5,7 +5,6 @@ """Helpful routines for regression testing.""" from base64 import b64encode -from binascii import unhexlify from decimal import Decimal, ROUND_DOWN from subprocess import CalledProcessError import hashlib @@ -214,10 +213,6 @@ def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) -def hex_str_to_bytes(hex_str): - return unhexlify(hex_str.encode('ascii')) - - def str_to_b64str(string): return b64encode(string.encode('utf-8')).decode('ascii') @@ -517,7 +512,7 @@ def gen_return_txouts(): from .messages import CTxOut txout = CTxOut() txout.nValue = 0 - txout.scriptPubKey = hex_str_to_bytes(script_pubkey) + txout.scriptPubKey = bytes.fromhex(script_pubkey) for _ in range(128): txouts.append(txout) return txouts diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 47ec6b0be2..609553c6d0 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -27,7 +27,6 @@ ) from test_framework.util import ( assert_equal, - hex_str_to_bytes, satoshi_round, ) @@ -73,7 +72,7 @@ def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE): self._scriptPubKey = bytes(CScript([pub_key.get_bytes(), OP_CHECKSIG])) elif mode == MiniWalletMode.ADDRESS_OP_TRUE: self._address = ADDRESS_BCRT1_P2WSH_OP_TRUE - self._scriptPubKey = hex_str_to_bytes(self._test_node.validateaddress(self._address)['scriptPubKey']) + self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey']) def scan_blocks(self, *, start=1, num): """Scan the blocks for self._address outputs and add them to self._utxos""" diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py index acbc040741..1ee55aa3b7 100755 --- a/test/functional/test_framework/wallet_util.py +++ b/test/functional/test_framework/wallet_util.py @@ -27,7 +27,6 @@ script_to_p2sh_script, script_to_p2wsh_script, ) -from test_framework.util import hex_str_to_bytes Key = namedtuple('Key', ['privkey', 'pubkey', @@ -93,7 +92,7 @@ def get_multisig(node): addr = node.getaddressinfo(node.getnewaddress()) addrs.append(addr['address']) pubkeys.append(addr['pubkey']) - script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG]) + script_code = CScript([OP_2] + [bytes.fromhex(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG]) witness_script = script_to_p2wsh_script(script_code) return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs], pubkeys=pubkeys, From 6408b24517f3418e2a408071b4c2ce26571f3167 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Mon, 8 Feb 2021 13:34:40 +0100 Subject: [PATCH 096/112] fuzz: move init code to the CAddrManDeterministic constructor Move the addrman init code from the test case to a newly added `CAddrManDeterministic` constructor. This way it can be reused by other tests. --- src/test/fuzz/addrman.cpp | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index 344d1dde8e..94369fa5a0 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -25,10 +25,15 @@ void initialize_addrman() class CAddrManDeterministic : public CAddrMan { public: - void MakeDeterministic(const uint256& random_seed) + explicit CAddrManDeterministic(FuzzedDataProvider& fuzzed_data_provider) { - WITH_LOCK(cs, insecure_rand = FastRandomContext{random_seed}); - Clear(); + WITH_LOCK(cs, insecure_rand = FastRandomContext{ConsumeUInt256(fuzzed_data_provider)}); + if (fuzzed_data_provider.ConsumeBool()) { + m_asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider); + if (!SanityCheckASMap(m_asmap)) { + m_asmap.clear(); + } + } } }; @@ -36,14 +41,7 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); SetMockTime(ConsumeTime(fuzzed_data_provider)); - CAddrManDeterministic addr_man; - addr_man.MakeDeterministic(ConsumeUInt256(fuzzed_data_provider)); - if (fuzzed_data_provider.ConsumeBool()) { - addr_man.m_asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider); - if (!SanityCheckASMap(addr_man.m_asmap)) { - addr_man.m_asmap.clear(); - } - } + CAddrManDeterministic addr_man{fuzzed_data_provider}; if (fuzzed_data_provider.ConsumeBool()) { const std::vector serialized_data{ConsumeRandomLengthByteVector(fuzzed_data_provider)}; CDataStream ds(serialized_data, SER_DISK, INIT_PROTO_VERSION); From 059171009b0138555f311cedc2553015ff618323 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Sun, 1 Aug 2021 18:36:43 +1000 Subject: [PATCH 097/112] consensus/params: simplify ValidDeployment check to avoid gcc warning --- src/consensus/params.h | 4 ++-- src/deploymentstatus.cpp | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/src/consensus/params.h b/src/consensus/params.h index 9205cfee87..77bf7fd0d8 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -23,7 +23,7 @@ enum BuriedDeployment : int16_t { DEPLOYMENT_CSV, DEPLOYMENT_SEGWIT, }; -constexpr bool ValidDeployment(BuriedDeployment dep) { return DEPLOYMENT_HEIGHTINCB <= dep && dep <= DEPLOYMENT_SEGWIT; } +constexpr bool ValidDeployment(BuriedDeployment dep) { return dep <= DEPLOYMENT_SEGWIT; } enum DeploymentPos : uint16_t { DEPLOYMENT_TESTDUMMY, @@ -31,7 +31,7 @@ enum DeploymentPos : uint16_t { // NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp MAX_VERSION_BITS_DEPLOYMENTS }; -constexpr bool ValidDeployment(DeploymentPos dep) { return DEPLOYMENT_TESTDUMMY <= dep && dep <= DEPLOYMENT_TAPROOT; } +constexpr bool ValidDeployment(DeploymentPos dep) { return dep < MAX_VERSION_BITS_DEPLOYMENTS; } /** * Struct for each individual consensus rule change using BIP9. diff --git a/src/deploymentstatus.cpp b/src/deploymentstatus.cpp index 9007800421..bba86639a3 100644 --- a/src/deploymentstatus.cpp +++ b/src/deploymentstatus.cpp @@ -7,6 +7,8 @@ #include #include +#include + VersionBitsCache g_versionbitscache; /* Basic sanity checking for BuriedDeployment/DeploymentPos enums and @@ -15,3 +17,18 @@ VersionBitsCache g_versionbitscache; static_assert(ValidDeployment(Consensus::DEPLOYMENT_TESTDUMMY), "sanity check of DeploymentPos failed (TESTDUMMY not valid)"); static_assert(!ValidDeployment(Consensus::MAX_VERSION_BITS_DEPLOYMENTS), "sanity check of DeploymentPos failed (MAX value considered valid)"); static_assert(!ValidDeployment(static_cast(Consensus::DEPLOYMENT_TESTDUMMY)), "sanity check of BuriedDeployment failed (overlaps with DeploymentPos)"); + +/* ValidDeployment only checks upper bounds for ensuring validity. + * This checks that the lowest possible value or the type is also a + * (specific) valid deployment so that lower bounds don't need to be checked. + */ + +template +static constexpr bool is_minimum() +{ + using U = typename std::underlying_type::type; + return x == std::numeric_limits::min(); +} + +static_assert(is_minimum(), "heightincb is not minimum value for BuriedDeployment"); +static_assert(is_minimum(), "testdummy is not minimum value for DeploymentPos"); From fa2c868886a52352631d32c398d98a2740639e86 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Mon, 2 Aug 2021 16:37:49 +0200 Subject: [PATCH 098/112] doc: Add release notes for 22570 (ignore banlist.dat) --- doc/release-notes.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/doc/release-notes.md b/doc/release-notes.md index cf9edd9b08..f46e9e52c7 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -66,6 +66,16 @@ New RPCs Build System ------------ +Files +----- + +* On startup, the list of banned hosts and networks (via `setban` RPC) in + `banlist.dat` is ignored and only `banlist.json` is considered. Bitcoin Core + version 22.x is the only version that can read `banlist.dat` and also write + it to `banlist.json`. If `banlist.json` already exists, version 22.x will not + try to translate the `banlist.dat` into json. After an upgrade, `listbanned` + can be used to double check the parsed entries. (#22570) + New settings ------------ From 4a1b2a7ba7f804e656a8cd29d5aa80fcbd40904f Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sun, 1 Aug 2021 19:24:49 +0200 Subject: [PATCH 099/112] [GetTransaction] remove unneeded `cs_main` lock acquire --- src/node/transaction.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index 1861755aff..2a7bcc057f 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -125,8 +125,6 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock) { - LOCK(cs_main); - if (mempool && !block_index) { CTransactionRef ptx = mempool->get(hash); if (ptx) return ptx; From 318176aff1ded36d1fbc5977f288ac3bac1d8712 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Wed, 28 Jul 2021 23:48:34 +0200 Subject: [PATCH 100/112] doc: Update high-level addrman description --- src/addrman.h | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/addrman.h b/src/addrman.h index 1fc64ac07f..7e87457a9f 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -104,19 +104,23 @@ class CAddrInfo : public CAddress * * Make sure no (localized) attacker can fill the entire table with his nodes/addresses. * * To that end: - * * Addresses are organized into buckets. - * * Addresses that have not yet been tried go into 1024 "new" buckets. - * * Based on the address range (/16 for IPv4) of the source of information, 64 buckets are selected at random. + * * Addresses are organized into buckets that can each store up to 64 entries. + * * Addresses to which our node has not successfully connected go into 1024 "new" buckets. + * * Based on the address range (/16 for IPv4) of the source of information, or if an asmap is provided, + * the AS it belongs to (for IPv4/IPv6), 64 buckets are selected at random. * * The actual bucket is chosen from one of these, based on the range in which the address itself is located. + * * The position in the bucket is chosen based on the full address. * * One single address can occur in up to 8 different buckets to increase selection chances for addresses that * are seen frequently. The chance for increasing this multiplicity decreases exponentially. - * * When adding a new address to a full bucket, a randomly chosen entry (with a bias favoring less recently seen - * ones) is removed from it first. + * * When adding a new address to an occupied position of a bucket, it will not replace the existing entry + * unless that address is also stored in another bucket or it doesn't meet one of several quality criteria + * (see IsTerrible for exact criteria). * * Addresses of nodes that are known to be accessible go into 256 "tried" buckets. * * Each address range selects at random 8 of these buckets. * * The actual bucket is chosen from one of these, based on the full address. - * * When adding a new good address to a full bucket, a randomly chosen entry (with a bias favoring less recently - * tried ones) is evicted from it, back to the "new" buckets. + * * When adding a new good address to an occupied position of a bucket, a FEELER connection to the + * old address is attempted. The old entry is only replaced and moved back to the "new" buckets if this + * attempt was unsuccessful. * * Bucket selection is based on cryptographic hashing, using a randomly-generated 256-bit key, which should not * be observable by adversaries. * * Several indexes are kept for high performance. Defining DEBUG_ADDRMAN will introduce frequent (and expensive) From 036d7eadf5dd0e06e0734a5d78dbe28f4bfaa07f Mon Sep 17 00:00:00 2001 From: Amiti Uttarwar Date: Mon, 2 Aug 2021 22:22:18 +0200 Subject: [PATCH 101/112] doc: Correct description of CAddrMan::Create() Co-authored-by: Martin Zumsande --- src/addrman.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/addrman.h b/src/addrman.h index 7e87457a9f..4f1d5e6bdf 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -696,8 +696,7 @@ class CAddrMan //! Find an entry. CAddrInfo* Find(const CNetAddr& addr, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); - //! find an entry, creating it if necessary. - //! nTime and nServices of the found node are updated, if necessary. + //! Create a new entry and add it to the internal data structures mapInfo, mapAddr and vRandom. CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs); //! Swap two elements in vRandom. From 91b05974fc1ea38062f12d36152201af81bda1a2 Mon Sep 17 00:00:00 2001 From: naiza Date: Mon, 26 Jul 2021 03:06:26 +0530 Subject: [PATCH 102/112] Improve mempool_accept_wtxid.py Improve mempool_accept_wtxid.py Improve mempool_accept_wtxid.py Improve mempool_accept_wtxid.py Improve mempool_accept_wtxid.py --- test/functional/mempool_accept_wtxid.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/test/functional/mempool_accept_wtxid.py b/test/functional/mempool_accept_wtxid.py index 63ecc8ee2a..ffafe7428f 100755 --- a/test/functional/mempool_accept_wtxid.py +++ b/test/functional/mempool_accept_wtxid.py @@ -4,9 +4,10 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test mempool acceptance in case of an already known transaction -with identical non-witness data different witness. +with identical non-witness data but different witness. """ +from copy import deepcopy from test_framework.messages import ( COIN, COutPoint, @@ -79,10 +80,7 @@ def run_test(self): child_one_txid = child_one.rehash() # Create another identical transaction with witness solving second branch - child_two = CTransaction() - child_two.vin.append(CTxIn(COutPoint(int(parent_txid, 16), 0), b"")) - child_two.vout.append(CTxOut(int(9.99996 * COIN), child_script_pubkey)) - child_two.wit.vtxinwit.append(CTxInWitness()) + child_two = deepcopy(child_one) child_two.wit.vtxinwit[0].scriptWitness.stack = [b'', witness_script] child_two_wtxid = child_two.getwtxid() child_two_txid = child_two.rehash() @@ -104,8 +102,7 @@ def run_test(self): "allowed": False, "reject-reason": "txn-already-in-mempool" }]) - testres_child_two = node.testmempoolaccept([child_two.serialize().hex()])[0] - assert_equal(testres_child_two, { + assert_equal(node.testmempoolaccept([child_two.serialize().hex()])[0], { "txid": child_two_txid, "wtxid": child_two_wtxid, "allowed": False, From 5e33f762d44557a1e3f0ff3c280d8a3ab98e3867 Mon Sep 17 00:00:00 2001 From: Jon Atack Date: Tue, 3 Aug 2021 13:04:19 +0200 Subject: [PATCH 103/112] p2p, rpc: address relay fixups --- src/net_processing.cpp | 4 ++-- src/rpc/net.cpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 005fe1bf0c..3a85e99a5f 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -652,7 +652,7 @@ class PeerManagerImpl final : public PeerManager * @return True if address relay is enabled with peer * False if address relay is disallowed */ - bool SetupAddressRelay(CNode& node, Peer& peer); + bool SetupAddressRelay(const CNode& node, Peer& peer); }; } // namespace @@ -4461,7 +4461,7 @@ class CompareInvMempoolOrder }; } -bool PeerManagerImpl::SetupAddressRelay(CNode& node, Peer& peer) +bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) { // We don't participate in addr relay with outbound block-relay-only // connections to prevent providing adversaries with the additional diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 3962a13924..abc9ec3ce3 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -118,7 +118,6 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "addr", "(host:port) The IP address and port of the peer"}, {RPCResult::Type::STR, "addrbind", "(ip:port) Bind address of the connection to the peer"}, {RPCResult::Type::STR, "addrlocal", "(ip:port) Local address as reported by the peer"}, - {RPCResult::Type::BOOL, "addr_relay_enabled", "Whether we participate in address relay with this peer"}, {RPCResult::Type::STR, "network", "Network (" + Join(GetNetworkNames(/* append_unroutable */ true), ", ") + ")"}, {RPCResult::Type::NUM, "mapped_as", "The AS in the BGP route to the peer used for diversifying\n" "peer selection (only available if the asmap config flag is set)"}, @@ -151,6 +150,7 @@ static RPCHelpMan getpeerinfo() { {RPCResult::Type::NUM, "n", "The heights of blocks we're currently asking from this peer"}, }}, + {RPCResult::Type::BOOL, "addr_relay_enabled", "Whether we participate in address relay with this peer"}, {RPCResult::Type::ARR, "permissions", "Any special permissions that have been granted to this peer", { {RPCResult::Type::STR, "permission_type", Join(NET_PERMISSIONS_DOC, ",\n") + ".\n"}, @@ -202,7 +202,6 @@ static RPCHelpMan getpeerinfo() if (!(stats.addrLocal.empty())) { obj.pushKV("addrlocal", stats.addrLocal); } - obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled); obj.pushKV("network", GetNetworkName(stats.m_network)); if (stats.m_mapped_as != 0) { obj.pushKV("mapped_as", uint64_t(stats.m_mapped_as)); @@ -244,6 +243,7 @@ static RPCHelpMan getpeerinfo() heights.push_back(height); } obj.pushKV("inflight", heights); + obj.pushKV("addr_relay_enabled", statestats.m_addr_relay_enabled); obj.pushKV("addr_processed", statestats.m_addr_processed); obj.pushKV("addr_rate_limited", statestats.m_addr_rate_limited); } From d54d94959869b0c363939163b99ba0475751dcb6 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 3 Aug 2021 22:21:34 +0300 Subject: [PATCH 104/112] qt: Fix regression in "Encrypt Wallet" menu item Adding a new item to the m_wallet_selector must follow the establishment of signal-slot connections. --- src/qt/bitcoingui.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index f8aeb01659..863225099a 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -682,8 +682,6 @@ void BitcoinGUI::addWallet(WalletModel* walletModel) m_wallet_selector_label_action->setVisible(true); m_wallet_selector_action->setVisible(true); } - const QString display_name = walletModel->getDisplayName(); - m_wallet_selector->addItem(display_name, QVariant::fromValue(walletModel)); connect(wallet_view, &WalletView::outOfSyncWarningClicked, this, &BitcoinGUI::showModalOverlay); connect(wallet_view, &WalletView::transactionClicked, this, &BitcoinGUI::gotoHistoryPage); @@ -696,6 +694,8 @@ void BitcoinGUI::addWallet(WalletModel* walletModel) connect(wallet_view, &WalletView::hdEnabledStatusChanged, this, &BitcoinGUI::updateWalletStatus); connect(this, &BitcoinGUI::setPrivacy, wallet_view, &WalletView::setPrivacy); wallet_view->setPrivacy(isPrivacyModeActivated()); + const QString display_name = walletModel->getDisplayName(); + m_wallet_selector->addItem(display_name, QVariant::fromValue(walletModel)); } void BitcoinGUI::removeWallet(WalletModel* walletModel) From 32fa49a18497a9b8c72e36a72ae96e7b23930223 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 4 Aug 2021 13:38:36 +0800 Subject: [PATCH 105/112] make ParseOutputType return a std::optional --- src/outputtype.cpp | 17 +++++++---------- src/outputtype.h | 3 ++- src/rpc/misc.cpp | 8 +++++--- src/test/fuzz/kitchen_sink.cpp | 8 ++++---- src/test/fuzz/string.cpp | 3 +-- src/wallet/rpcwallet.cpp | 27 +++++++++++++++------------ src/wallet/wallet.cpp | 10 ++++++---- 7 files changed, 40 insertions(+), 36 deletions(-) diff --git a/src/outputtype.cpp b/src/outputtype.cpp index 8ede7b9974..b5f1df9792 100644 --- a/src/outputtype.cpp +++ b/src/outputtype.cpp @@ -13,6 +13,7 @@ #include #include +#include #include static const std::string OUTPUT_TYPE_STRING_LEGACY = "legacy"; @@ -20,22 +21,18 @@ static const std::string OUTPUT_TYPE_STRING_P2SH_SEGWIT = "p2sh-segwit"; static const std::string OUTPUT_TYPE_STRING_BECH32 = "bech32"; static const std::string OUTPUT_TYPE_STRING_BECH32M = "bech32m"; -bool ParseOutputType(const std::string& type, OutputType& output_type) +std::optional ParseOutputType(const std::string& type) { if (type == OUTPUT_TYPE_STRING_LEGACY) { - output_type = OutputType::LEGACY; - return true; + return OutputType::LEGACY; } else if (type == OUTPUT_TYPE_STRING_P2SH_SEGWIT) { - output_type = OutputType::P2SH_SEGWIT; - return true; + return OutputType::P2SH_SEGWIT; } else if (type == OUTPUT_TYPE_STRING_BECH32) { - output_type = OutputType::BECH32; - return true; + return OutputType::BECH32; } else if (type == OUTPUT_TYPE_STRING_BECH32M) { - output_type = OutputType::BECH32M; - return true; + return OutputType::BECH32M; } - return false; + return std::nullopt; } const std::string& FormatOutputType(OutputType type) diff --git a/src/outputtype.h b/src/outputtype.h index 2b83235cd0..0de7689125 100644 --- a/src/outputtype.h +++ b/src/outputtype.h @@ -11,6 +11,7 @@ #include