From bd461195f4b3a3c53366c8f197e7701836cab053 Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Mon, 23 Dec 2024 17:13:39 -0500 Subject: [PATCH 001/390] bench: support benching all verbosity of `BlockToJson` --- src/bench/rpc_blockchain.cpp | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp index df951a14e495..52916242fa54 100644 --- a/src/bench/rpc_blockchain.cpp +++ b/src/bench/rpc_blockchain.cpp @@ -45,17 +45,34 @@ struct TestBlockAndIndex { } // namespace -static void BlockToJsonVerbose(benchmark::Bench& bench) +static void BlockToJson(benchmark::Bench& bench, TxVerbosity verbosity) { TestBlockAndIndex data; const uint256 pow_limit{data.testing_setup->m_node.chainman->GetParams().GetConsensus().powLimit}; bench.run([&] { - auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT, pow_limit); + auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, verbosity, pow_limit); ankerl::nanobench::doNotOptimizeAway(univalue); }); } -BENCHMARK(BlockToJsonVerbose, benchmark::PriorityLevel::HIGH); +static void BlockToJsonVerbosity1(benchmark::Bench& bench) +{ + BlockToJson(bench, TxVerbosity::SHOW_TXID); +} + +static void BlockToJsonVerbosity2(benchmark::Bench& bench) +{ + BlockToJson(bench, TxVerbosity::SHOW_DETAILS); +} + +static void BlockToJsonVerbosity3(benchmark::Bench& bench) +{ + BlockToJson(bench, TxVerbosity::SHOW_DETAILS_AND_PREVOUT); +} + +BENCHMARK(BlockToJsonVerbosity1, benchmark::PriorityLevel::HIGH); +BENCHMARK(BlockToJsonVerbosity2, benchmark::PriorityLevel::HIGH); +BENCHMARK(BlockToJsonVerbosity3, benchmark::PriorityLevel::HIGH); static void BlockToJsonVerboseWrite(benchmark::Bench& bench) { From 6a506d5c37d1cb179b4e818dfbeaff5834420a33 Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Mon, 28 Oct 2024 13:02:34 +0100 Subject: [PATCH 002/390] UniValue: add reserve member function - Only reserve keys when the typ is of `VOBJ`. --- src/univalue/include/univalue.h | 2 ++ src/univalue/lib/univalue.cpp | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/src/univalue/include/univalue.h b/src/univalue/include/univalue.h index da1215755542..0e5404eb1dfa 100644 --- a/src/univalue/include/univalue.h +++ b/src/univalue/include/univalue.h @@ -70,6 +70,8 @@ class UniValue { size_t size() const { return values.size(); } + void reserve(size_t new_cap); + void getObjMap(std::map& kv) const; bool checkObject(const std::map& memberTypes) const; const UniValue& operator[](const std::string& key) const; diff --git a/src/univalue/lib/univalue.cpp b/src/univalue/lib/univalue.cpp index 656d2e820300..4d37c81fe870 100644 --- a/src/univalue/lib/univalue.cpp +++ b/src/univalue/lib/univalue.cpp @@ -240,3 +240,10 @@ const UniValue& UniValue::find_value(std::string_view key) const return NullUniValue; } +void UniValue::reserve(size_t new_cap) +{ + values.reserve(new_cap); + if (typ == VOBJ) { + keys.reserve(new_cap); + } +} From 5d82d92aff7c11ce17ee809c060e37f73a8a687a Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Mon, 28 Oct 2024 13:03:11 +0100 Subject: [PATCH 003/390] rpc: reserve space for `UniValue` variables in `blockToJSON` - Reserving space avoid reallocation, this provide noticeable performance increase in verbosity 1. --- src/core_write.cpp | 3 +++ src/rpc/blockchain.cpp | 1 + 2 files changed, 4 insertions(+) diff --git a/src/core_write.cpp b/src/core_write.cpp index 253dfde1006b..14836f51489d 100644 --- a/src/core_write.cpp +++ b/src/core_write.cpp @@ -181,6 +181,7 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry entry.pushKV("locktime", (int64_t)tx.nLockTime); UniValue vin{UniValue::VARR}; + vin.reserve(tx.vin.size()); // If available, use Undo data to calculate the fee. Note that txundo == nullptr // for coinbase transactions and for transactions where undo data is unavailable. @@ -203,6 +204,7 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry } if (!tx.vin[i].scriptWitness.IsNull()) { UniValue txinwitness(UniValue::VARR); + txinwitness.reserve(tx.vin[i].scriptWitness.stack.size()); for (const auto& item : tx.vin[i].scriptWitness.stack) { txinwitness.push_back(HexStr(item)); } @@ -232,6 +234,7 @@ void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry entry.pushKV("vin", std::move(vin)); UniValue vout(UniValue::VARR); + vout.reserve(tx.vout.size()); for (unsigned int i = 0; i < tx.vout.size(); i++) { const CTxOut& txout = tx.vout[i]; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index ac1ce6285f70..0ac36b005ecc 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -184,6 +184,7 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn result.pushKV("size", (int)::GetSerializeSize(TX_WITH_WITNESS(block))); result.pushKV("weight", (int)::GetBlockWeight(block)); UniValue txs(UniValue::VARR); + txs.reserve(block.vtx.size()); switch (verbosity) { case TxVerbosity::SHOW_TXID: From 0786b7509acd7e160345eea5fc25acd3c795d01c Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Thu, 21 Nov 2024 13:35:19 +0100 Subject: [PATCH 004/390] rpc: add optional blockhash to waitfornewblock --- doc/release-30635.md | 4 ++++ src/rpc/blockchain.cpp | 19 ++++++++++++++++--- test/functional/rpc_blockchain.py | 3 ++- 3 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 doc/release-30635.md diff --git a/doc/release-30635.md b/doc/release-30635.md new file mode 100644 index 000000000000..179a4f49a0a9 --- /dev/null +++ b/doc/release-30635.md @@ -0,0 +1,4 @@ +Updated RPCs +------------ + +- The waitfornewblock RPC takes an optional `current_tip` argument. (#30635) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 6e5c656f3d83..ef1a7ed39398 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -266,6 +266,7 @@ static RPCHelpMan waitfornewblock() "\nMake sure to use no RPC timeout (bitcoin-cli -rpcclienttimeout=0)", { {"timeout", RPCArg::Type::NUM, RPCArg::Default{0}, "Time in milliseconds to wait for a response. 0 indicates no timeout."}, + {"current_tip", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "Method waits for the chain tip to differ from this."}, }, RPCResult{ RPCResult::Type::OBJ, "", "", @@ -287,10 +288,22 @@ static RPCHelpMan waitfornewblock() NodeContext& node = EnsureAnyNodeContext(request.context); Mining& miner = EnsureMining(node); - // Abort if RPC came out of warmup too early + // If the caller provided a current_tip value, pass it to waitTipChanged(). + // + // If the caller did not provide a current tip hash, call getTip() to get + // one and wait for the tip to be different from this value. This mode is + // less reliable because if the tip changed between waitfornewblock calls, + // it will need to change a second time before this call returns. BlockRef current_block{CHECK_NONFATAL(miner.getTip()).value()}; - std::optional block = timeout ? miner.waitTipChanged(current_block.hash, std::chrono::milliseconds(timeout)) : - miner.waitTipChanged(current_block.hash); + + uint256 tip_hash{request.params[1].isNull() + ? current_block.hash + : ParseHashV(request.params[1], "current_tip")}; + + // If the user provided an invalid current_tip then this call immediately + // returns the current tip. + std::optional block = timeout ? miner.waitTipChanged(tip_hash, std::chrono::milliseconds(timeout)) : + miner.waitTipChanged(tip_hash); // Return current block upon shutdown if (block) current_block = *block; diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 2976f9188ae6..b0ac519de0d9 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -579,7 +579,8 @@ def _test_waitforblock(self): node.reconsiderblock(rollback_hash) # The chain has probably already been restored by the time reconsiderblock returns, # but poll anyway. - self.wait_until(lambda: node.waitfornewblock(timeout=100)['hash'] == current_hash) + self.wait_until(lambda: node.waitfornewblock(current_tip=rollback_header['previousblockhash'])['hash'] == current_hash) + assert_raises_rpc_error(-1, "Negative timeout", node.waitfornewblock, -1) def _test_waitforblockheight(self): From c6e2c31c55123cc97b4400bcbf3c37a39b067a22 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Wed, 19 Feb 2025 13:39:41 +0100 Subject: [PATCH 005/390] rpc: unhide waitfor{block,newblock,blockheight} They are now reliable. An earlier commit dropped their IsRPCRunning() guards so they also work in the GUI. --- doc/release-30635.md | 3 ++- src/rpc/blockchain.cpp | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/release-30635.md b/doc/release-30635.md index 179a4f49a0a9..0ec68e93cc5d 100644 --- a/doc/release-30635.md +++ b/doc/release-30635.md @@ -1,4 +1,5 @@ Updated RPCs ------------ -- The waitfornewblock RPC takes an optional `current_tip` argument. (#30635) +- The waitfornewblock now takes an optional `current_tip` argument. It is also no longer hidden. (#30635) +- The waitforblock and waitforblockheight RPCs are no longer hidden. (#30635) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index ef1a7ed39398..a51c4843b79c 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -3437,9 +3437,9 @@ void RegisterBlockchainRPCCommands(CRPCTable& t) {"blockchain", &getchainstates}, {"hidden", &invalidateblock}, {"hidden", &reconsiderblock}, - {"hidden", &waitfornewblock}, - {"hidden", &waitforblock}, - {"hidden", &waitforblockheight}, + {"blockchain", &waitfornewblock}, + {"blockchain", &waitforblock}, + {"blockchain", &waitforblockheight}, {"hidden", &syncwithvalidationinterfacequeue}, }; for (const auto& c : commands) { From 832c57a53410870471a52647ce107454de3bc69c Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Wed, 7 May 2025 22:13:27 +0000 Subject: [PATCH 006/390] thread-safety: modernize thread safety macros Clang added new "capability"-based thread-safety attributes years ago, but the old ones remain supported for backwards-compatibility. However, while adding annotations for our reverse_lock, I noticed that there is a difference between the unlock_function and release_capability attributes. unlock_function actually maps to release_generic_capability, which does not work properly when implementing a scoped unlocker. To be consistent, the other capability-based attributes are updated here as well. To avoid having to update our macro usage throughout the codebase, I reused our existing ones. Additionally, SHARED_UNLOCK_FUNCTION is added here, as a subsequent PR will introduce annotations for shared_mutex and shared_lock. --- src/threadsafety.h | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/threadsafety.h b/src/threadsafety.h index 2e9a39bfc95e..d897dd2464f4 100644 --- a/src/threadsafety.h +++ b/src/threadsafety.h @@ -15,23 +15,24 @@ // See https://clang.llvm.org/docs/ThreadSafetyAnalysis.html // for documentation. The clang compiler can do advanced static analysis // of locking when given the -Wthread-safety option. -#define LOCKABLE __attribute__((lockable)) +#define LOCKABLE __attribute__((capability(""))) #define SCOPED_LOCKABLE __attribute__((scoped_lockable)) #define GUARDED_BY(x) __attribute__((guarded_by(x))) #define PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x))) #define ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) #define ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__))) -#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__((exclusive_lock_function(__VA_ARGS__))) -#define SHARED_LOCK_FUNCTION(...) __attribute__((shared_lock_function(__VA_ARGS__))) -#define EXCLUSIVE_TRYLOCK_FUNCTION(...) __attribute__((exclusive_trylock_function(__VA_ARGS__))) -#define SHARED_TRYLOCK_FUNCTION(...) __attribute__((shared_trylock_function(__VA_ARGS__))) -#define UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__))) +#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__((acquire_capability(__VA_ARGS__))) +#define SHARED_LOCK_FUNCTION(...) __attribute__((acquire_shared_capability(__VA_ARGS__))) +#define EXCLUSIVE_TRYLOCK_FUNCTION(...) __attribute__((try_acquire_capability(__VA_ARGS__))) +#define SHARED_TRYLOCK_FUNCTION(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) +#define UNLOCK_FUNCTION(...) __attribute__((release_capability(__VA_ARGS__))) +#define SHARED_UNLOCK_FUNCTION(...) __attribute__((release_shared_capability(__VA_ARGS__))) #define LOCK_RETURNED(x) __attribute__((lock_returned(x))) #define LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) -#define EXCLUSIVE_LOCKS_REQUIRED(...) __attribute__((exclusive_locks_required(__VA_ARGS__))) -#define SHARED_LOCKS_REQUIRED(...) __attribute__((shared_locks_required(__VA_ARGS__))) +#define EXCLUSIVE_LOCKS_REQUIRED(...) __attribute__((requires_capability(__VA_ARGS__))) +#define SHARED_LOCKS_REQUIRED(...) __attribute__((requires_shared_capability(__VA_ARGS__))) #define NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis)) -#define ASSERT_EXCLUSIVE_LOCK(...) __attribute__((assert_exclusive_lock(__VA_ARGS__))) +#define ASSERT_EXCLUSIVE_LOCK(...) __attribute__((assert_capability(__VA_ARGS__))) #else #define LOCKABLE #define SCOPED_LOCKABLE @@ -44,6 +45,7 @@ #define EXCLUSIVE_TRYLOCK_FUNCTION(...) #define SHARED_TRYLOCK_FUNCTION(...) #define UNLOCK_FUNCTION(...) +#define SHARED_UNLOCK_FUNCTION(...) #define LOCK_RETURNED(x) #define LOCKS_EXCLUDED(...) #define EXCLUSIVE_LOCKS_REQUIRED(...) From aeea5f0ec112f9ec29da37806925e961c4998365 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Thu, 8 May 2025 13:30:12 +0000 Subject: [PATCH 007/390] thread-safety: add missing lock annotation No warning is currently emitted because our reverse_lock does not enforce our thread-safety annotations. Once it is fixed, the unlock would cause a warning. --- src/node/interfaces.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 8aec2758f8b3..3f9d14c32815 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -431,7 +431,7 @@ class NodeImpl : public Node }; // NOLINTNEXTLINE(misc-no-recursion) -bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLock& lock, const CChain& active, const BlockManager& blockman) +bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLock& lock, const CChain& active, const BlockManager& blockman) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { if (!index) return false; if (block.m_hash) *block.m_hash = index->GetBlockHash(); From 2c43b6adebbfabb3c8dd82fe821ce0a5d6173b3b Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Thu, 15 May 2025 17:45:44 -0400 Subject: [PATCH 008/390] init: cap -maxmempool to 500 MB on 32-bit systems 32-bit architecture is limited to 4GiB, so it doesn't make sense to set a too high value. 500 MB is chosen as an arbitrary maximum value that seems reasonable. --- src/node/mempool_args.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/node/mempool_args.cpp b/src/node/mempool_args.cpp index 6dbba7838155..58ec730b965b 100644 --- a/src/node/mempool_args.cpp +++ b/src/node/mempool_args.cpp @@ -25,6 +25,9 @@ using common::AmountErrMsg; using kernel::MemPoolLimits; using kernel::MemPoolOptions; +//! Maximum mempool size on 32-bit systems. +static constexpr int MAX_32BIT_MEMPOOL_MB{500}; + namespace { void ApplyArgsManOptions(const ArgsManager& argsman, MemPoolLimits& mempool_limits) { @@ -42,7 +45,13 @@ util::Result ApplyArgsManOptions(const ArgsManager& argsman, const CChainP { mempool_opts.check_ratio = argsman.GetIntArg("-checkmempool", mempool_opts.check_ratio); - if (auto mb = argsman.GetIntArg("-maxmempool")) mempool_opts.max_size_bytes = *mb * 1'000'000; + if (auto mb = argsman.GetIntArg("-maxmempool")) { + constexpr bool is_32bit{sizeof(void*) == 4}; + if (is_32bit && *mb > MAX_32BIT_MEMPOOL_MB) { + return util::Error{Untranslated(strprintf("-maxmempool is set to %i but can't be over %i MB on 32-bit systems", *mb, MAX_32BIT_MEMPOOL_MB))}; + } + mempool_opts.max_size_bytes = *mb * 1'000'000; + } if (auto hours = argsman.GetIntArg("-mempoolexpiry")) mempool_opts.expiry = std::chrono::hours{*hours}; From 9f8e7b0b3b787b873045a4a8194e77d0b0a2b3b6 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Fri, 16 May 2025 09:12:25 -0400 Subject: [PATCH 009/390] node: cap -dbcache to 1GiB on 32-bit architectures 32-bit architecture is limited to 4GiB, so it doesn't make sense to set a too high value. Since this setting is performance critical, pick an arbitrary value higher than for -maxmempool but still reasonable. --- src/node/caches.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/node/caches.cpp b/src/node/caches.cpp index 8b432637c734..d5d69fc20443 100644 --- a/src/node/caches.cpp +++ b/src/node/caches.cpp @@ -19,6 +19,8 @@ static constexpr size_t MAX_TX_INDEX_CACHE{1024_MiB}; //! Max memory allocated to all block filter index caches combined in bytes. static constexpr size_t MAX_FILTER_INDEX_CACHE{1024_MiB}; +//! Maximum dbcache size on 32-bit systems. +static constexpr size_t MAX_32BIT_DBCACHE{1024_MiB}; namespace node { CacheSizes CalculateCacheSizes(const ArgsManager& args, size_t n_indexes) @@ -28,7 +30,8 @@ CacheSizes CalculateCacheSizes(const ArgsManager& args, size_t n_indexes) if (std::optional db_cache = args.GetIntArg("-dbcache")) { if (*db_cache < 0) db_cache = 0; uint64_t db_cache_bytes = SaturatingLeftShift(*db_cache, 20); - total_cache = std::max(MIN_DB_CACHE, std::min(db_cache_bytes, std::numeric_limits::max())); + constexpr auto max_db_cache{sizeof(void*) == 4 ? MAX_32BIT_DBCACHE : std::numeric_limits::max()}; + total_cache = std::max(MIN_DB_CACHE, std::min(db_cache_bytes, max_db_cache)); } IndexCacheSizes index_sizes; From 5332082d009914a8eeb087cc5057c0cc11f3207e Mon Sep 17 00:00:00 2001 From: will Date: Mon, 19 May 2025 10:28:51 +0100 Subject: [PATCH 010/390] doc: add alpine build instructions --- doc/build-unix.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/doc/build-unix.md b/doc/build-unix.md index d8857d43a79b..8ffa4edfa6a3 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -139,6 +139,46 @@ The GUI will be able to encode addresses in QR codes unless this feature is expl Otherwise, if you don't need QR encoding support, use the `-DWITH_QRENCODE=OFF` option to disable this feature in order to compile the GUI. +### Alpine + +#### Dependency Build Instructions + +Build requirements: + + apk add build-base cmake linux-headers pkgconf python3 + +Now, you can either build from self-compiled [depends](#dependencies) or install the required dependencies: + + apk add libevent-dev boost-dev + +SQLite is required for the descriptor wallet: + + apk add sqlite-dev + +To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode) + +ZMQ dependencies (provides ZMQ API): + + apk add zeromq-dev + +User-Space, Statically Defined Tracing (USDT) is not directly compatible on +Alpine as it uses libc functionality not available on musl. + +GUI dependencies: + +Bitcoin Core includes a GUI built with the cross-platform Qt Framework. To compile the GUI, we need to install +the necessary parts of Qt, the libqrencode and pass `-DBUILD_GUI=ON`. Skip if you don't intend to use the GUI. + + apk add qt6-qtbase-dev qt6-qttools-dev + +For Qt 6.5 and later, the `xcb-util-cursor` package must be installed at runtime. + +The GUI will be able to encode addresses in QR codes unless this feature is explicitly disabled. To install libqrencode, run: + + apk add libqrencode-dev + +Otherwise, if you don't need QR encoding support, use the `-DWITH_QRENCODE=OFF` option to disable this feature in order to compile the GUI. + ## Dependencies See [dependencies.md](dependencies.md) for a complete overview, and From 5fe4c66462e6149c2ed3ce24224a7a7b328a2cfa Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 29 Jan 2024 17:32:02 -0500 Subject: [PATCH 011/390] XOnlyPubKey: Add GetCPubKeys We need to retrieve the even and odd compressed pubkeys for xonly pubkeys, so add a function to do that. Also reuse it in GetKeyIDs. --- src/pubkey.cpp | 20 +++++++++++++------- src/pubkey.h | 4 ++++ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/pubkey.cpp b/src/pubkey.cpp index a4ca9a170a9b..6041c89e7f10 100644 --- a/src/pubkey.cpp +++ b/src/pubkey.cpp @@ -197,20 +197,26 @@ constexpr XOnlyPubKey XOnlyPubKey::NUMS_H{ []() consteval { return XOnlyPubKey{"50929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0"_hex_u8}; }(), }; -std::vector XOnlyPubKey::GetKeyIDs() const +std::vector XOnlyPubKey::GetCPubKeys() const { - std::vector out; - // For now, use the old full pubkey-based key derivation logic. As it is indexed by - // Hash160(full pubkey), we need to return both a version prefixed with 0x02, and one - // with 0x03. + std::vector out; unsigned char b[33] = {0x02}; std::copy(m_keydata.begin(), m_keydata.end(), b + 1); CPubKey fullpubkey; fullpubkey.Set(b, b + 33); - out.push_back(fullpubkey.GetID()); + out.push_back(fullpubkey); b[0] = 0x03; fullpubkey.Set(b, b + 33); - out.push_back(fullpubkey.GetID()); + out.push_back(fullpubkey); + return out; +} + +std::vector XOnlyPubKey::GetKeyIDs() const +{ + std::vector out; + for (const CPubKey& pk : GetCPubKeys()) { + out.push_back(pk.GetID()); + } return out; } diff --git a/src/pubkey.h b/src/pubkey.h index cbc827dc6068..442dc2d64319 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -283,9 +283,13 @@ class XOnlyPubKey std::optional> CreateTapTweak(const uint256* merkle_root) const; /** Returns a list of CKeyIDs for the CPubKeys that could have been used to create this XOnlyPubKey. + * As the CKeyID is the Hash160(full pubkey), the produced CKeyIDs are for the versions of this + * XOnlyPubKey with 0x02 and 0x03 prefixes. * This is needed for key lookups since keys are indexed by CKeyID. */ std::vector GetKeyIDs() const; + /** Returns this XOnlyPubKey with 0x02 and 0x03 prefixes */ + std::vector GetCPubKeys() const; CPubKey GetEvenCorrespondingCPubKey() const; From 69f588a99a7a79d1d72300bc0f2c8475f95f6c6a Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 13 May 2025 14:33:45 -0700 Subject: [PATCH 012/390] wallet: Set upgraded descriptor cache flag for newly created wallets Although WalletBatch::LoadWallet performs the descriptor cache upgrade, because new wallets do not have the descriptor flag set yet, the upgrade does not run and set the flag. Since new wallets will always being using the upgraded cache, there's no reason to wait to set the flag, so set it when the wallet flags are being initialized for new wallets. --- src/wallet/wallet.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 4a4aa837eaad..75d34e7215e7 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2904,7 +2904,9 @@ std::shared_ptr CWallet::Create(WalletContext& context, const std::stri // ensure this wallet.dat can only be opened by clients supporting HD with chain split and expects no default key walletInstance->SetMinVersion(FEATURE_LATEST); - walletInstance->InitWalletFlags(wallet_creation_flags); + // Init with passed flags. + // Always set the cache upgrade flag as this feature is supported from the beginning. + walletInstance->InitWalletFlags(wallet_creation_flags | WALLET_FLAG_LAST_HARDENED_XPUB_CACHED); // Only descriptor wallets can be created assert(walletInstance->IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)); From bc2a26b296238cbead6012c071bc7741c40fbd02 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 12 May 2025 14:00:28 -0700 Subject: [PATCH 013/390] wallet: Add GetWalletFlags --- src/wallet/wallet.cpp | 5 +++++ src/wallet/wallet.h | 2 ++ 2 files changed, 7 insertions(+) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 75d34e7215e7..ab04864f9ec1 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1739,6 +1739,11 @@ void CWallet::InitWalletFlags(uint64_t flags) if (!LoadWalletFlags(flags)) assert(false); } +uint64_t CWallet::GetWalletFlags() const +{ + return m_wallet_flags; +} + void CWallet::MaybeUpdateBirthTime(int64_t time) { int64_t birthtime = m_birth_time.load(); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index fbc3bed2ab67..989db8e79df1 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -906,6 +906,8 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati void InitWalletFlags(uint64_t flags); /** Loads the flags into the wallet. (used by LoadWallet) */ bool LoadWalletFlags(uint64_t flags); + //! Retrieve all of the wallet's flags + uint64_t GetWalletFlags() const; /** Returns a bracketed wallet name for displaying in logs, will return [default wallet] if the wallet has no name */ std::string GetDisplayName() const override From 47237cd1938058b29fdec242c3a37611e255fda0 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 22 May 2025 17:29:21 -0700 Subject: [PATCH 014/390] wallet, rpc: Output wallet flags in getwalletinfo --- src/wallet/rpc/wallet.cpp | 25 ++++++++++++++++++++++--- src/wallet/wallet.h | 26 ++++++++++++++++++-------- test/functional/wallet_avoidreuse.py | 2 ++ test/functional/wallet_createwallet.py | 1 + 4 files changed, 43 insertions(+), 11 deletions(-) diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp index c2db094fba5a..ac371ab004e5 100644 --- a/src/wallet/rpc/wallet.cpp +++ b/src/wallet/rpc/wallet.cpp @@ -62,6 +62,10 @@ static RPCHelpMan getwalletinfo() {RPCResult::Type::BOOL, "external_signer", "whether this wallet is configured to use an external signer such as a hardware wallet"}, {RPCResult::Type::BOOL, "blank", "Whether this wallet intentionally does not contain any keys, scripts, or descriptors"}, {RPCResult::Type::NUM_TIME, "birthtime", /*optional=*/true, "The start time for blocks scanning. It could be modified by (re)importing any descriptor with an earlier timestamp."}, + {RPCResult::Type::ARR, "flags", "The flags currently set on the wallet", + { + {RPCResult::Type::STR, "flag", "The name of the flag"}, + }}, RESULT_LAST_PROCESSED_BLOCK, }}, }, @@ -121,6 +125,21 @@ static RPCHelpMan getwalletinfo() obj.pushKV("birthtime", birthtime); } + // Push known flags + UniValue flags(UniValue::VARR); + uint64_t wallet_flags = pwallet->GetWalletFlags(); + for (uint64_t i = 0; i < 64; ++i) { + uint64_t flag = uint64_t{1} << i; + if (flag & wallet_flags) { + if (flag & KNOWN_WALLET_FLAGS) { + flags.push_back(WALLET_FLAG_TO_STRING.at(WalletFlags{flag})); + } else { + flags.push_back(strprintf("unknown_flag_%u", i)); + } + } + } + obj.pushKV("flags", flags); + AppendLastProcessedBlock(obj, *pwallet); return obj; }, @@ -263,7 +282,7 @@ static RPCHelpMan loadwallet() static RPCHelpMan setwalletflag() { std::string flags; - for (auto& it : WALLET_FLAG_MAP) + for (auto& it : STRING_TO_WALLET_FLAG) if (it.second & MUTABLE_WALLET_FLAGS) flags += (flags == "" ? "" : ", ") + it.first; @@ -294,11 +313,11 @@ static RPCHelpMan setwalletflag() std::string flag_str = request.params[0].get_str(); bool value = request.params[1].isNull() || request.params[1].get_bool(); - if (!WALLET_FLAG_MAP.count(flag_str)) { + if (!STRING_TO_WALLET_FLAG.count(flag_str)) { throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Unknown wallet flag: %s", flag_str)); } - auto flag = WALLET_FLAG_MAP.at(flag_str); + auto flag = STRING_TO_WALLET_FLAG.at(flag_str); if (!(flag & MUTABLE_WALLET_FLAGS)) { throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Wallet flag is immutable: %s", flag_str)); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index 989db8e79df1..513866786872 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -160,14 +160,24 @@ static constexpr uint64_t KNOWN_WALLET_FLAGS = static constexpr uint64_t MUTABLE_WALLET_FLAGS = WALLET_FLAG_AVOID_REUSE; -static const std::map WALLET_FLAG_MAP{ - {"avoid_reuse", WALLET_FLAG_AVOID_REUSE}, - {"blank", WALLET_FLAG_BLANK_WALLET}, - {"key_origin_metadata", WALLET_FLAG_KEY_ORIGIN_METADATA}, - {"last_hardened_xpub_cached", WALLET_FLAG_LAST_HARDENED_XPUB_CACHED}, - {"disable_private_keys", WALLET_FLAG_DISABLE_PRIVATE_KEYS}, - {"descriptor_wallet", WALLET_FLAG_DESCRIPTORS}, - {"external_signer", WALLET_FLAG_EXTERNAL_SIGNER} +static const std::map WALLET_FLAG_TO_STRING{ + {WALLET_FLAG_AVOID_REUSE, "avoid_reuse"}, + {WALLET_FLAG_BLANK_WALLET, "blank"}, + {WALLET_FLAG_KEY_ORIGIN_METADATA, "key_origin_metadata"}, + {WALLET_FLAG_LAST_HARDENED_XPUB_CACHED, "last_hardened_xpub_cached"}, + {WALLET_FLAG_DISABLE_PRIVATE_KEYS, "disable_private_keys"}, + {WALLET_FLAG_DESCRIPTORS, "descriptor_wallet"}, + {WALLET_FLAG_EXTERNAL_SIGNER, "external_signer"} +}; + +static const std::map STRING_TO_WALLET_FLAG{ + {WALLET_FLAG_TO_STRING.at(WALLET_FLAG_AVOID_REUSE), WALLET_FLAG_AVOID_REUSE}, + {WALLET_FLAG_TO_STRING.at(WALLET_FLAG_BLANK_WALLET), WALLET_FLAG_BLANK_WALLET}, + {WALLET_FLAG_TO_STRING.at(WALLET_FLAG_KEY_ORIGIN_METADATA), WALLET_FLAG_KEY_ORIGIN_METADATA}, + {WALLET_FLAG_TO_STRING.at(WALLET_FLAG_LAST_HARDENED_XPUB_CACHED), WALLET_FLAG_LAST_HARDENED_XPUB_CACHED}, + {WALLET_FLAG_TO_STRING.at(WALLET_FLAG_DISABLE_PRIVATE_KEYS), WALLET_FLAG_DISABLE_PRIVATE_KEYS}, + {WALLET_FLAG_TO_STRING.at(WALLET_FLAG_DESCRIPTORS), WALLET_FLAG_DESCRIPTORS}, + {WALLET_FLAG_TO_STRING.at(WALLET_FLAG_EXTERNAL_SIGNER), WALLET_FLAG_EXTERNAL_SIGNER} }; /** A wrapper to reserve an address from a wallet diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py index 2ae153a937e5..44fa16ff64c8 100755 --- a/test/functional/wallet_avoidreuse.py +++ b/test/functional/wallet_avoidreuse.py @@ -104,7 +104,9 @@ def test_persistence(self): # Flags should be node1.avoid_reuse=false, node2.avoid_reuse=true assert_equal(self.nodes[0].getwalletinfo()["avoid_reuse"], False) + assert_equal(sorted(self.nodes[0].getwalletinfo()["flags"]), sorted(["descriptor_wallet", "last_hardened_xpub_cached"])) assert_equal(self.nodes[1].getwalletinfo()["avoid_reuse"], True) + assert_equal(sorted(self.nodes[1].getwalletinfo()["flags"]), sorted(["descriptor_wallet", "last_hardened_xpub_cached", "avoid_reuse"])) self.restart_node(1) self.connect_nodes(0, 1) diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py index 312d22fce484..a2e7aae33ea4 100755 --- a/test/functional/wallet_createwallet.py +++ b/test/functional/wallet_createwallet.py @@ -44,6 +44,7 @@ def run_test(self): assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getrawchangeaddress) import_res = w1.importdescriptors([{"desc": w0.getaddressinfo(address1)['desc'], "timestamp": "now"}]) assert_equal(import_res[0]["success"], True) + assert_equal(sorted(w1.getwalletinfo()["flags"]), sorted(["last_hardened_xpub_cached", "descriptor_wallet", "disable_private_keys"])) self.log.info('Test that private keys cannot be imported') privkey, pubkey = generate_keypair(wif=True) From d04f6a97ba9a55aa9455e1a805feeed4d630f59a Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 11 Dec 2024 13:05:21 -0500 Subject: [PATCH 015/390] refactor: remove sqlite dir path back-and-forth conversion --- src/wallet/sqlite.cpp | 6 +++--- src/wallet/sqlite.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp index 9b6215f9371f..2886d9b22dfd 100644 --- a/src/wallet/sqlite.cpp +++ b/src/wallet/sqlite.cpp @@ -112,12 +112,12 @@ Mutex SQLiteDatabase::g_sqlite_mutex; int SQLiteDatabase::g_sqlite_count = 0; SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, const DatabaseOptions& options, bool mock) - : WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync) + : WalletDatabase(), m_mock(mock), m_dir_path(dir_path), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync) { { LOCK(g_sqlite_mutex); LogPrintf("Using SQLite Version %s\n", SQLiteDatabaseVersion()); - LogPrintf("Using wallet %s\n", m_dir_path); + LogPrintf("Using wallet %s\n", fs::PathToString(m_dir_path)); if (++g_sqlite_count == 1) { // Setup logging @@ -253,7 +253,7 @@ void SQLiteDatabase::Open() if (m_db == nullptr) { if (!m_mock) { - TryCreateDirectories(fs::PathFromString(m_dir_path)); + TryCreateDirectories(m_dir_path); } int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr); if (ret != SQLITE_OK) { diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h index 14ad38792c42..cb0c608dc609 100644 --- a/src/wallet/sqlite.h +++ b/src/wallet/sqlite.h @@ -104,7 +104,7 @@ class SQLiteDatabase : public WalletDatabase private: const bool m_mock{false}; - const std::string m_dir_path; + const fs::path m_dir_path; const std::string m_file_path; From 1de423e0a08bbc63eed36c8772e9ef8b48e80fb8 Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 11 Dec 2024 13:10:01 -0500 Subject: [PATCH 016/390] wallet: introduce method to return all db created files --- src/wallet/db.h | 3 +++ src/wallet/migrate.h | 1 + src/wallet/sqlite.h | 8 ++++++++ src/wallet/test/util.h | 1 + 4 files changed, 13 insertions(+) diff --git a/src/wallet/db.h b/src/wallet/db.h index 7870dcc4b22e..96e1e5630e19 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -155,6 +155,9 @@ class WalletDatabase /** Return path to main database file for logs and error messages. */ virtual std::string Filename() = 0; + /** Return paths to all database created files */ + virtual std::vector Files() = 0; + virtual std::string Format() = 0; /** Make a DatabaseBatch connected to this database */ diff --git a/src/wallet/migrate.h b/src/wallet/migrate.h index 6f388809f0c8..e72df288521a 100644 --- a/src/wallet/migrate.h +++ b/src/wallet/migrate.h @@ -50,6 +50,7 @@ class BerkeleyRODatabase : public WalletDatabase /** Return path to main database file for logs and error messages. */ std::string Filename() override { return fs::PathToString(m_filepath); } + std::vector Files() override { return {m_filepath}; } std::string Format() override { return "bdb_ro"; } diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h index cb0c608dc609..895cfb12dd4b 100644 --- a/src/wallet/sqlite.h +++ b/src/wallet/sqlite.h @@ -147,6 +147,14 @@ class SQLiteDatabase : public WalletDatabase bool Backup(const std::string& dest) const override; std::string Filename() override { return m_file_path; } + /** Return paths to all database created files */ + std::vector Files() override + { + std::vector files; + files.emplace_back(m_dir_path / fs::PathFromString(m_file_path)); + files.emplace_back(m_dir_path / fs::PathFromString(m_file_path + "-journal")); + return files; + } std::string Format() override { return "sqlite"; } /** Make a SQLiteBatch connected to this database */ diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h index 7a19806731bd..4c84a1713f25 100644 --- a/src/wallet/test/util.h +++ b/src/wallet/test/util.h @@ -109,6 +109,7 @@ class MockableDatabase : public WalletDatabase void Close() override {} std::string Filename() override { return "mockable"; } + std::vector Files() override { return {}; } std::string Format() override { return "mock"; } std::unique_ptr MakeBatch() override { return std::make_unique(m_records, m_pass); } }; From e86d71b749c08bde6002b9aa2baee824975a518a Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 28 May 2025 06:17:54 -0400 Subject: [PATCH 017/390] wallet: refactor, dedup wallet re-loading code --- src/wallet/wallet.cpp | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 6542abc23df2..fb5c74c9f7f9 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4314,21 +4314,20 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr std::set wallet_dirs; if (success) { // Migration successful, unload all wallets locally, then reload them. - // Reload the main wallet - wallet_dirs.insert(fs::PathFromString(local_wallet->GetDatabase().Filename()).parent_path()); - success = reload_wallet(local_wallet); + // Note: We use a pointer to the shared_ptr to avoid increasing its reference count, + // as 'reload_wallet' expects to be the sole owner (use_count == 1). + for (std::shared_ptr* wallet_ptr : {&local_wallet, &res.watchonly_wallet, &res.solvables_wallet}) { + if (success && *wallet_ptr) { + std::shared_ptr& wallet = *wallet_ptr; + // Save db path and reload wallet + wallet_dirs.insert(fs::PathFromString(wallet->GetDatabase().Filename()).parent_path()); + success = reload_wallet(wallet); + } + } + + // Set main wallet res.wallet = local_wallet; res.wallet_name = wallet_name; - if (success && res.watchonly_wallet) { - // Reload watchonly - wallet_dirs.insert(fs::PathFromString(res.watchonly_wallet->GetDatabase().Filename()).parent_path()); - success = reload_wallet(res.watchonly_wallet); - } - if (success && res.solvables_wallet) { - // Reload solvables - wallet_dirs.insert(fs::PathFromString(res.solvables_wallet->GetDatabase().Filename()).parent_path()); - success = reload_wallet(res.solvables_wallet); - } } if (!success) { // Migration failed, cleanup From b78990734621b8fe46c68a6e7edaf1fbd2f7d351 Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 4 Dec 2024 12:48:18 -0500 Subject: [PATCH 018/390] wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet Currently, the migration process creates a brand-new descriptor wallet with no connection to the user's legacy wallet when the legacy wallet lacks key material and contains only watch-only scripts. This behavior is not aligned with user expectations. If the legacy wallet contains only watch-only scripts, the migration process should only generate a watch-only wallet instead. --- src/qt/walletcontroller.cpp | 2 +- src/wallet/wallet.cpp | 44 +++++++++++++++++++++++++---- test/functional/wallet_migration.py | 24 ++++++++++++++-- 3 files changed, 60 insertions(+), 10 deletions(-) diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp index dd093e984a37..869f9614c56b 100644 --- a/src/qt/walletcontroller.cpp +++ b/src/qt/walletcontroller.cpp @@ -468,7 +468,7 @@ void MigrateWalletActivity::migrate(const std::string& name) auto res{node().walletLoader().migrateWallet(name, passphrase)}; if (res) { - m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(res->wallet->getWalletName()))); + m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(name))); if (res->watchonly_wallet_name) { m_success_message += QChar(' ') + tr("Watchonly scripts have been migrated to a new wallet named '%1'.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(res->watchonly_wallet_name.value()))); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index fb5c74c9f7f9..a6a9a6d3c67d 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -3872,6 +3872,9 @@ util::Result CWallet::ApplyMigrationData(WalletBatch& local_wallet_batch, return util::Error{Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing"))}; } + // Note: when the legacy wallet has no spendable scripts, it must be empty at the end of the process. + bool has_spendable_material = !data.desc_spkms.empty() || data.master_key.key.IsValid(); + // Get all invalid or non-watched scripts that will not be migrated std::set not_migrated_dests; for (const auto& script : legacy_spkm->GetNotMineScriptPubKeys()) { @@ -3903,9 +3906,9 @@ util::Result CWallet::ApplyMigrationData(WalletBatch& local_wallet_batch, m_external_spk_managers.clear(); m_internal_spk_managers.clear(); - // Setup new descriptors + // Setup new descriptors (only if we are migrating any key material) SetWalletFlagWithDB(local_wallet_batch, WALLET_FLAG_DESCRIPTORS); - if (!IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { + if (has_spendable_material && !IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { // Use the existing master key if we have it if (data.master_key.key.IsValid()) { SetupDescriptorScriptPubKeyMans(local_wallet_batch, data.master_key); @@ -4055,6 +4058,14 @@ util::Result CWallet::ApplyMigrationData(WalletBatch& local_wallet_batch, } } + // If there was no key material in the main wallet, there should be no records on it anymore. + // This wallet will be discarded at the end of the process. Only wallets that contain the + // migrated records will be presented to the user. + if (!has_spendable_material) { + if (!m_address_book.empty()) return util::Error{_("Error: Not all address book records were migrated")}; + if (!mapWallet.empty()) return util::Error{_("Error: Not all transaction records were migrated")}; + } + return {}; // all good } @@ -4292,6 +4303,14 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } } + // Indicates whether the current wallet is empty after migration. + // Notes: + // When non-empty: the local wallet becomes the main spendable wallet. + // When empty: The local wallet is excluded from the result, as the + // user does not expect an empty spendable wallet after + // migrating only watch-only scripts. + bool empty_local_wallet = false; + { LOCK(local_wallet->cs_wallet); // First change to using SQLite @@ -4300,6 +4319,8 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Do the migration of keys and scripts for non-empty wallets, and cleanup if it fails if (HasLegacyRecords(*local_wallet)) { success = DoMigration(*local_wallet, context, error, res); + // No scripts mean empty wallet after migration + empty_local_wallet = local_wallet->GetAllScriptPubKeyMans().empty(); } else { // Make sure that descriptors flag is actually set local_wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS); @@ -4313,6 +4334,15 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // fails to reload. std::set wallet_dirs; if (success) { + Assume(!res.wallet); // We will set it here. + // Check if the local wallet is empty after migration + if (empty_local_wallet) { + // This wallet has no records. We can safely remove it. + std::vector paths_to_remove = local_wallet->GetDatabase().Files(); + local_wallet.reset(); + for (const auto& path_to_remove : paths_to_remove) fs::remove_all(path_to_remove); + } + // Migration successful, unload all wallets locally, then reload them. // Note: We use a pointer to the shared_ptr to avoid increasing its reference count, // as 'reload_wallet' expects to be the sole owner (use_count == 1). @@ -4322,12 +4352,14 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Save db path and reload wallet wallet_dirs.insert(fs::PathFromString(wallet->GetDatabase().Filename()).parent_path()); success = reload_wallet(wallet); + + // When no wallet is set, set the main wallet. + if (!res.wallet) { + res.wallet_name = wallet->GetName(); + res.wallet = std::move(wallet); + } } } - - // Set main wallet - res.wallet = local_wallet; - res.wallet_name = wallet_name; } if (!success) { // Migration failed, cleanup diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 700d801891c3..c9548de1232d 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -3,7 +3,7 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test Migrating a wallet from legacy to descriptor.""" - +import os.path import random import shutil import struct @@ -116,9 +116,14 @@ def migrate_and_get_rpc(self, wallet_name, **kwargs): # Migrate, checking that rescan does not occur with self.master_node.assert_debug_log(expected_msgs=[], unexpected_msgs=["Rescanning"]): migrate_info = self.master_node.migratewallet(wallet_name=wallet_name, **kwargs) + # Update wallet name in case the initial wallet was completely migrated to a watch-only wallet + # (in which case the wallet name would be suffixed by the 'watchonly' term) + wallet_name = migrate_info['wallet_name'] wallet = self.master_node.get_wallet_rpc(wallet_name) assert_equal(wallet.getwalletinfo()["descriptors"], True) self.assert_is_sqlite(wallet_name) + # Always verify the backup path exist after migration + assert os.path.exists(migrate_info['backup_path']) return migrate_info, wallet def test_basic(self): @@ -1014,8 +1019,15 @@ def test_migrate_simple_watch_only(self): wallet.importaddress(address=p2pk_script.hex()) # Migrate wallet in the latest node res, _ = self.migrate_and_get_rpc("bare_p2pk") - wo_wallet = self.master_node.get_wallet_rpc(res['watchonly_name']) + wo_wallet = self.master_node.get_wallet_rpc(res['wallet_name']) assert_equal(wo_wallet.listdescriptors()['descriptors'][0]['desc'], descsum_create(f'pk({pubkey.hex()})')) + assert_equal(wo_wallet.getwalletinfo()["private_keys_enabled"], False) + + # Ensure that migrating a wallet with watch-only scripts does not create a spendable wallet. + assert_equal('bare_p2pk_watchonly', res['wallet_name']) + assert "bare_p2pk" not in self.master_node.listwallets() + assert "bare_p2pk" not in [w["name"] for w in self.master_node.listwalletdir()["wallets"]] + wo_wallet.unloadwallet() def test_manual_keys_import(self): @@ -1045,7 +1057,9 @@ def test_manual_keys_import(self): res, _ = self.migrate_and_get_rpc("import_pubkeys") # Same as before, there should be descriptors in the watch-only wallet for the imported pubkey - wo_wallet = self.nodes[0].get_wallet_rpc(res['watchonly_name']) + wo_wallet = self.nodes[0].get_wallet_rpc(res['wallet_name']) + # Assert this is a watch-only wallet + assert_equal(wo_wallet.getwalletinfo()["private_keys_enabled"], False) # As we imported the pubkey only, there will be no key origin in the following descriptors pk_desc = descsum_create(f'pk({pubkey_hex})') pkh_desc = descsum_create(f'pkh({pubkey_hex})') @@ -1056,6 +1070,10 @@ def test_manual_keys_import(self): # Verify all expected descriptors were migrated migrated_desc = [item['desc'] for item in wo_wallet.listdescriptors()['descriptors']] assert_equal(expected_descs, migrated_desc) + # Ensure that migrating a wallet with watch-only scripts does not create a spendable wallet. + assert_equal('import_pubkeys_watchonly', res['wallet_name']) + assert "import_pubkeys" not in self.master_node.listwallets() + assert "import_pubkeys" not in [w["name"] for w in self.master_node.listwalletdir()["wallets"]] wo_wallet.unloadwallet() def test_p2wsh(self): From b184f5c87c418ea49429e4ce0520c655d458306b Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Fri, 30 May 2025 01:34:33 +0200 Subject: [PATCH 019/390] test: update BIP340 test vectors and implementation (variable-length messages) See https://github.com/bitcoin/bips/pull/1446, commit 200f9b26fe0a2f235a2af8b30c4be9f12f6bc9cb --- test/functional/test_framework/bip340_test_vectors.csv | 4 ++++ test/functional/test_framework/key.py | 4 +--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/test/functional/test_framework/bip340_test_vectors.csv b/test/functional/test_framework/bip340_test_vectors.csv index e068322deb3b..aa317a3b3d53 100644 --- a/test/functional/test_framework/bip340_test_vectors.csv +++ b/test/functional/test_framework/bip340_test_vectors.csv @@ -14,3 +14,7 @@ index,secret key,public key,aux_rand,message,signature,verification result,comme 12,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F69E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,sig[0:32] is equal to field size 13,,DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E177769FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141,FALSE,sig[32:64] is equal to curve order 14,,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30,,243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89,6CFF5C3BA86C69EA4B7376F31A9BCB4F74C1976089B2D9963DA2E5543E17776969E89B4C5564D00349106B8497785DD7D1D713A8AE82B32FA79D5F7FC407D39B,FALSE,public key is not a valid X coordinate because it exceeds the field size +15,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,,71535DB165ECD9FBBC046E5FFAEA61186BB6AD436732FCCC25291A55895464CF6069CE26BF03466228F19A3A62DB8A649F2D560FAC652827D1AF0574E427AB63,TRUE,message of size 0 (added 2022-12) +16,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,11,08A20A0AFEF64124649232E0693C583AB1B9934AE63B4C3511F3AE1134C6A303EA3173BFEA6683BD101FA5AA5DBC1996FE7CACFC5A577D33EC14564CEC2BACBF,TRUE,message of size 1 (added 2022-12) +17,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,0102030405060708090A0B0C0D0E0F1011,5130F39A4059B43BC7CAC09A19ECE52B5D8699D1A71E3C52DA9AFDB6B50AC370C4A482B77BF960F8681540E25B6771ECE1E5A37FD80E5A51897C5566A97EA5A5,TRUE,message of size 17 (added 2022-12) +18,0340034003400340034003400340034003400340034003400340034003400340,778CAA53B4393AC467774D09497A87224BF9FAB6F6E68B23086497324D6FD117,0000000000000000000000000000000000000000000000000000000000000000,99999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999,403B12B0D8555A344175EA7EC746566303321E5DBFA8BE6F091635163ECA79A8585ED3E3170807E7C03B720FC54C7B23897FCBA0E9D0B4A06894CFD249F22367,TRUE,message of size 100 (added 2022-12) diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index 682c2de35feb..558dcbf23edd 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -242,10 +242,9 @@ def verify_schnorr(key, sig, msg): - key is a 32-byte xonly pubkey (computed using compute_xonly_pubkey). - sig is a 64-byte Schnorr signature - - msg is a 32-byte message + - msg is a variable-length message """ assert len(key) == 32 - assert len(msg) == 32 assert len(sig) == 64 P = secp256k1.GE.from_bytes_xonly(key) @@ -272,7 +271,6 @@ def sign_schnorr(key, msg, aux=None, flip_p=False, flip_r=False): aux = bytes(32) assert len(key) == 32 - assert len(msg) == 32 assert len(aux) == 32 sec = int.from_bytes(key, 'big') From 6135e0553e6e58fcf506700991fa178f2c50a266 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 12 May 2025 13:29:50 -0700 Subject: [PATCH 020/390] wallet, rpc: Move (Un)LockCoin WalletBatch creation out of RPC If the locked coin needs to be persisted to the wallet database, insteead of having the RPC figure out when to create a WalletBatch and having LockCoin's behavior depend on it, have LockCoin take whether to persist as a parameter so it makes the batch. Since unlocking a persisted locked coin requires a database write as well, we need to track whether the locked coin was persisted to the wallet database so that it can erase the locked coin when necessary. Keeping track of whether a locked coin was persisted is also useful information for future PRs. --- src/wallet/interfaces.cpp | 6 ++-- src/wallet/rpc/coins.cpp | 8 ++--- src/wallet/rpc/spend.cpp | 2 +- src/wallet/spend.cpp | 2 +- src/wallet/test/wallet_tests.cpp | 4 +-- src/wallet/wallet.cpp | 57 +++++++++++++++++--------------- src/wallet/wallet.h | 15 +++++---- src/wallet/walletdb.cpp | 2 +- 8 files changed, 49 insertions(+), 47 deletions(-) diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp index 0048c025a292..ad75e69eccc4 100644 --- a/src/wallet/interfaces.cpp +++ b/src/wallet/interfaces.cpp @@ -249,14 +249,12 @@ class WalletImpl : public Wallet bool lockCoin(const COutPoint& output, const bool write_to_db) override { LOCK(m_wallet->cs_wallet); - std::unique_ptr batch = write_to_db ? std::make_unique(m_wallet->GetDatabase()) : nullptr; - return m_wallet->LockCoin(output, batch.get()); + return m_wallet->LockCoin(output, write_to_db); } bool unlockCoin(const COutPoint& output) override { LOCK(m_wallet->cs_wallet); - std::unique_ptr batch = std::make_unique(m_wallet->GetDatabase()); - return m_wallet->UnlockCoin(output, batch.get()); + return m_wallet->UnlockCoin(output); } bool isLockedCoin(const COutPoint& output) override { diff --git a/src/wallet/rpc/coins.cpp b/src/wallet/rpc/coins.cpp index cce9b26babef..9351259a5a56 100644 --- a/src/wallet/rpc/coins.cpp +++ b/src/wallet/rpc/coins.cpp @@ -356,16 +356,12 @@ RPCHelpMan lockunspent() outputs.push_back(outpt); } - std::unique_ptr batch = nullptr; - // Unlock is always persistent - if (fUnlock || persistent) batch = std::make_unique(pwallet->GetDatabase()); - // Atomically set (un)locked status for the outputs. for (const COutPoint& outpt : outputs) { if (fUnlock) { - if (!pwallet->UnlockCoin(outpt, batch.get())) throw JSONRPCError(RPC_WALLET_ERROR, "Unlocking coin failed"); + if (!pwallet->UnlockCoin(outpt)) throw JSONRPCError(RPC_WALLET_ERROR, "Unlocking coin failed"); } else { - if (!pwallet->LockCoin(outpt, batch.get())) throw JSONRPCError(RPC_WALLET_ERROR, "Locking coin failed"); + if (!pwallet->LockCoin(outpt, persistent)) throw JSONRPCError(RPC_WALLET_ERROR, "Locking coin failed"); } } diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp index 4c4b62883691..4421822dd19c 100644 --- a/src/wallet/rpc/spend.cpp +++ b/src/wallet/rpc/spend.cpp @@ -1579,7 +1579,7 @@ RPCHelpMan sendall() const bool lock_unspents{options.exists("lock_unspents") ? options["lock_unspents"].get_bool() : false}; if (lock_unspents) { for (const CTxIn& txin : rawTx.vin) { - pwallet->LockCoin(txin.prevout); + pwallet->LockCoin(txin.prevout, /*persist=*/false); } } diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp index a330c31a87a7..13f7d3d61a7d 100644 --- a/src/wallet/spend.cpp +++ b/src/wallet/spend.cpp @@ -1467,7 +1467,7 @@ util::Result FundTransaction(CWallet& wallet, const CM if (lockUnspents) { for (const CTxIn& txin : res->tx->vin) { - wallet.LockCoin(txin.prevout); + wallet.LockCoin(txin.prevout, /*persist=*/false); } } diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 966c6d2c4ba1..fa1416966167 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -458,7 +458,7 @@ BOOST_FIXTURE_TEST_CASE(ListCoinsTest, ListCoinsTestingSetup) for (const auto& group : list) { for (const auto& coin : group.second) { LOCK(wallet->cs_wallet); - wallet->LockCoin(coin.outpoint); + wallet->LockCoin(coin.outpoint, /*persist=*/false); } } { @@ -486,7 +486,7 @@ void TestCoinsResult(ListCoinsTest& context, OutputType out_type, CAmount amount filter.skip_locked = false; CoinsResult available_coins = AvailableCoins(*context.wallet, nullptr, std::nullopt, filter); // Lock outputs so they are not spent in follow-up transactions - for (uint32_t i = 0; i < wtx.tx->vout.size(); i++) context.wallet->LockCoin({wtx.GetHash(), i}); + for (uint32_t i = 0; i < wtx.tx->vout.size(); i++) context.wallet->LockCoin({wtx.GetHash(), i}, /*persist=*/false); for (const auto& [type, size] : expected_coins_sizes) BOOST_CHECK_EQUAL(size, available_coins.coins[type].size()); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 4a4aa837eaad..6a40cfe97eb1 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -785,16 +785,11 @@ bool CWallet::IsSpent(const COutPoint& outpoint) const return false; } -void CWallet::AddToSpends(const COutPoint& outpoint, const Txid& txid, WalletBatch* batch) +void CWallet::AddToSpends(const COutPoint& outpoint, const Txid& txid) { mapTxSpends.insert(std::make_pair(outpoint, txid)); - if (batch) { - UnlockCoin(outpoint, batch); - } else { - WalletBatch temp_batch(GetDatabase()); - UnlockCoin(outpoint, &temp_batch); - } + UnlockCoin(outpoint); std::pair range; range = mapTxSpends.equal_range(outpoint); @@ -802,13 +797,13 @@ void CWallet::AddToSpends(const COutPoint& outpoint, const Txid& txid, WalletBat } -void CWallet::AddToSpends(const CWalletTx& wtx, WalletBatch* batch) +void CWallet::AddToSpends(const CWalletTx& wtx) { if (wtx.IsCoinBase()) // Coinbases don't spend anything! return; for (const CTxIn& txin : wtx.tx->vin) - AddToSpends(txin.prevout, wtx.GetHash(), batch); + AddToSpends(txin.prevout, wtx.GetHash()); } bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase) @@ -1058,7 +1053,7 @@ CWalletTx* CWallet::AddToWallet(CTransactionRef tx, const TxState& state, const wtx.nOrderPos = IncOrderPosNext(&batch); wtx.m_it_wtxOrdered = wtxOrdered.insert(std::make_pair(wtx.nOrderPos, &wtx)); wtx.nTimeSmart = ComputeTimeSmart(wtx, rescanning_old_block); - AddToSpends(wtx, &batch); + AddToSpends(wtx); // Update birth time when tx time is older than it. MaybeUpdateBirthTime(wtx.GetTxTime()); @@ -2622,22 +2617,34 @@ util::Result CWallet::DisplayAddress(const CTxDestination& dest) return util::Error{_("There is no ScriptPubKeyManager for this address")}; } -bool CWallet::LockCoin(const COutPoint& output, WalletBatch* batch) +void CWallet::LoadLockedCoin(const COutPoint& coin, bool persistent) { AssertLockHeld(cs_wallet); - setLockedCoins.insert(output); - if (batch) { - return batch->WriteLockedUTXO(output); + m_locked_coins.emplace(coin, persistent); +} + +bool CWallet::LockCoin(const COutPoint& output, bool persist) +{ + AssertLockHeld(cs_wallet); + LoadLockedCoin(output, persist); + if (persist) { + WalletBatch batch(GetDatabase()); + return batch.WriteLockedUTXO(output); } return true; } -bool CWallet::UnlockCoin(const COutPoint& output, WalletBatch* batch) +bool CWallet::UnlockCoin(const COutPoint& output) { AssertLockHeld(cs_wallet); - bool was_locked = setLockedCoins.erase(output); - if (batch && was_locked) { - return batch->EraseLockedUTXO(output); + auto locked_coin_it = m_locked_coins.find(output); + if (locked_coin_it != m_locked_coins.end()) { + bool persisted = locked_coin_it->second; + m_locked_coins.erase(locked_coin_it); + if (persisted) { + WalletBatch batch(GetDatabase()); + return batch.EraseLockedUTXO(output); + } } return true; } @@ -2647,26 +2654,24 @@ bool CWallet::UnlockAllCoins() AssertLockHeld(cs_wallet); bool success = true; WalletBatch batch(GetDatabase()); - for (auto it = setLockedCoins.begin(); it != setLockedCoins.end(); ++it) { - success &= batch.EraseLockedUTXO(*it); + for (const auto& [coin, persistent] : m_locked_coins) { + if (persistent) success = success && batch.EraseLockedUTXO(coin); } - setLockedCoins.clear(); + m_locked_coins.clear(); return success; } bool CWallet::IsLockedCoin(const COutPoint& output) const { AssertLockHeld(cs_wallet); - return setLockedCoins.count(output) > 0; + return m_locked_coins.count(output) > 0; } void CWallet::ListLockedCoins(std::vector& vOutpts) const { AssertLockHeld(cs_wallet); - for (std::set::iterator it = setLockedCoins.begin(); - it != setLockedCoins.end(); it++) { - COutPoint outpt = (*it); - vOutpts.push_back(outpt); + for (const auto& [coin, _] : m_locked_coins) { + vOutpts.push_back(coin); } } diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index fbc3bed2ab67..7fe557f71d7b 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -333,8 +333,8 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati */ typedef std::unordered_multimap TxSpends; TxSpends mapTxSpends GUARDED_BY(cs_wallet); - void AddToSpends(const COutPoint& outpoint, const Txid& txid, WalletBatch* batch = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - void AddToSpends(const CWalletTx& wtx, WalletBatch* batch = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void AddToSpends(const COutPoint& outpoint, const Txid& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void AddToSpends(const CWalletTx& wtx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); /** * Add a transaction to the wallet, or update it. confirm.block_* should @@ -497,8 +497,10 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati /** Set of Coins owned by this wallet that we won't try to spend from. A * Coin may be locked if it has already been used to fund a transaction * that hasn't confirmed yet. We wouldn't consider the Coin spent already, - * but also shouldn't try to use it again. */ - std::set setLockedCoins GUARDED_BY(cs_wallet); + * but also shouldn't try to use it again. + * bool to track whether this locked coin is persisted to disk. + */ + std::map m_locked_coins GUARDED_BY(cs_wallet); /** Registered interfaces::Chain::Notifications handler. */ std::unique_ptr m_chain_notifications_handler; @@ -546,8 +548,9 @@ class CWallet final : public WalletStorage, public interfaces::Chain::Notificati util::Result DisplayAddress(const CTxDestination& dest) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); bool IsLockedCoin(const COutPoint& output) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool LockCoin(const COutPoint& output, WalletBatch* batch = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - bool UnlockCoin(const COutPoint& output, WalletBatch* batch = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + void LoadLockedCoin(const COutPoint& coin, bool persistent) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool LockCoin(const COutPoint& output, bool persist) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); + bool UnlockCoin(const COutPoint& output) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); bool UnlockAllCoins() EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); void ListLockedCoins(std::vector& vOutpts) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 3bb5db71f5e8..0f0e228fc803 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -1072,7 +1072,7 @@ static DBErrors LoadTxRecords(CWallet* pwallet, DatabaseBatch& batch, std::vecto uint32_t n; key >> hash; key >> n; - pwallet->LockCoin(COutPoint(hash, n)); + pwallet->LoadLockedCoin(COutPoint(hash, n), /*persistent=*/true); return DBErrors::LOAD_OK; }); result = std::max(result, locked_utxo_res.m_result); From 8ba2f9b7c8a6c6a91cc718d256354f7a73083b68 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Thu, 5 Jun 2025 11:32:23 +0200 Subject: [PATCH 021/390] refactor: use util::Result for GetExternalSigner() This commit does not change behavior, except that the error message no longer contains the function name. --- src/wallet/external_signer_scriptpubkeyman.cpp | 17 ++++++++++------- src/wallet/external_signer_scriptpubkeyman.h | 3 ++- src/wallet/wallet.cpp | 10 ++++++---- test/functional/wallet_signer.py | 2 +- 4 files changed, 19 insertions(+), 13 deletions(-) diff --git a/src/wallet/external_signer_scriptpubkeyman.cpp b/src/wallet/external_signer_scriptpubkeyman.cpp index 7268354f1a95..5ffc802ec146 100644 --- a/src/wallet/external_signer_scriptpubkeyman.cpp +++ b/src/wallet/external_signer_scriptpubkeyman.cpp @@ -45,14 +45,14 @@ bool ExternalSignerScriptPubKeyMan::SetupDescriptor(WalletBatch& batch, std::uni return true; } -ExternalSigner ExternalSignerScriptPubKeyMan::GetExternalSigner() { + util::Result ExternalSignerScriptPubKeyMan::GetExternalSigner() { const std::string command = gArgs.GetArg("-signer", ""); - if (command == "") throw std::runtime_error(std::string(__func__) + ": restart bitcoind with -signer="); + if (command == "") return util::Error{Untranslated("restart bitcoind with -signer=")}; std::vector signers; ExternalSigner::Enumerate(command, signers, Params().GetChainTypeString()); - if (signers.empty()) throw std::runtime_error(std::string(__func__) + ": No external signers found"); + if (signers.empty()) return util::Error{Untranslated("No external signers found")}; // TODO: add fingerprint argument instead of failing in case of multiple signers. - if (signers.size() > 1) throw std::runtime_error(std::string(__func__) + ": More than one external signer found. Please connect only one at a time."); + if (signers.size() > 1) return util::Error{Untranslated("More than one external signer found. Please connect only one at a time.")}; return signers[0]; } @@ -93,9 +93,12 @@ std::optional ExternalSignerScriptPubKeyMan::FillPSBT(PartiallySigned } if (complete) return {}; - std::string strFailReason; - if(!GetExternalSigner().SignTransaction(psbt, strFailReason)) { - tfm::format(std::cerr, "Failed to sign: %s\n", strFailReason); + auto signer{GetExternalSigner()}; + if (!signer) throw std::runtime_error(util::ErrorString(signer).original); + + std::string failure_reason; + if(!signer->SignTransaction(psbt, failure_reason)) { + tfm::format(std::cerr, "Failed to sign: %s\n", failure_reason); return PSBTError::EXTERNAL_SIGNER_FAILED; } if (finalize) FinalizePSBT(psbt); // This won't work in a multisig setup diff --git a/src/wallet/external_signer_scriptpubkeyman.h b/src/wallet/external_signer_scriptpubkeyman.h index 66c2dd9d170a..27c0258ea51b 100644 --- a/src/wallet/external_signer_scriptpubkeyman.h +++ b/src/wallet/external_signer_scriptpubkeyman.h @@ -8,6 +8,7 @@ #include #include +#include struct bilingual_str; @@ -27,7 +28,7 @@ class ExternalSignerScriptPubKeyMan : public DescriptorScriptPubKeyMan */ bool SetupDescriptor(WalletBatch& batch, std::unique_ptrdesc); - static ExternalSigner GetExternalSigner(); + static util::Result GetExternalSigner(); /** * Display address on the device and verify that the returned value matches. diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 6542abc23df2..62a5fba7c407 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2602,8 +2602,9 @@ util::Result CWallet::DisplayAddress(const CTxDestination& dest) if (signer_spk_man == nullptr) { continue; } - ExternalSigner signer = ExternalSignerScriptPubKeyMan::GetExternalSigner(); - return signer_spk_man->DisplayAddress(dest, signer); + auto signer{ExternalSignerScriptPubKeyMan::GetExternalSigner()}; + if (!signer) throw std::runtime_error(util::ErrorString(signer).original); + return signer_spk_man->DisplayAddress(dest, *signer); } return util::Error{_("There is no ScriptPubKeyManager for this address")}; } @@ -3586,11 +3587,12 @@ void CWallet::SetupDescriptorScriptPubKeyMans() return true; })) throw std::runtime_error("Error: cannot process db transaction for descriptors setup"); } else { - ExternalSigner signer = ExternalSignerScriptPubKeyMan::GetExternalSigner(); + auto signer = ExternalSignerScriptPubKeyMan::GetExternalSigner(); + if (!signer) throw std::runtime_error(util::ErrorString(signer).original); // TODO: add account parameter int account = 0; - UniValue signer_res = signer.GetDescriptors(account); + UniValue signer_res = signer->GetDescriptors(account); if (!signer_res.isObject()) throw std::runtime_error(std::string(__func__) + ": Unexpected result"); diff --git a/test/functional/wallet_signer.py b/test/functional/wallet_signer.py index 3159f66641ed..c9ac876eb71e 100755 --- a/test/functional/wallet_signer.py +++ b/test/functional/wallet_signer.py @@ -243,7 +243,7 @@ def test_multiple_signers(self): self.log.debug(f"-signer={self.mock_multi_signers_path()}") self.log.info('Test multiple external signers') - assert_raises_rpc_error(-1, "GetExternalSigner: More than one external signer found", self.nodes[1].createwallet, wallet_name='multi_hww', disable_private_keys=True, external_signer=True) + assert_raises_rpc_error(-1, "More than one external signer found", self.nodes[1].createwallet, wallet_name='multi_hww', disable_private_keys=True, external_signer=True) if __name__ == '__main__': WalletSignerTest(__file__).main() From 0a4ee93529d68a31f3ba6c7c6009954be47bbbd6 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Thu, 5 Jun 2025 11:46:08 +0200 Subject: [PATCH 022/390] wallet: use PSBTError::EXTERNAL_SIGNER_NOT_FOUND Instead of throwing a runtime error, let the caller decide how to handle a missing signer. GUI code was already in place to handle this, but it was unused until this commit. Fixes #32426 Additionally use LogWarning instead of std::cerr. --- src/wallet/external_signer_scriptpubkeyman.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/wallet/external_signer_scriptpubkeyman.cpp b/src/wallet/external_signer_scriptpubkeyman.cpp index 5ffc802ec146..c2b1b6048a87 100644 --- a/src/wallet/external_signer_scriptpubkeyman.cpp +++ b/src/wallet/external_signer_scriptpubkeyman.cpp @@ -94,11 +94,14 @@ std::optional ExternalSignerScriptPubKeyMan::FillPSBT(PartiallySigned if (complete) return {}; auto signer{GetExternalSigner()}; - if (!signer) throw std::runtime_error(util::ErrorString(signer).original); + if (!signer) { + LogWarning("%s", util::ErrorString(signer).original); + return PSBTError::EXTERNAL_SIGNER_NOT_FOUND; + } std::string failure_reason; if(!signer->SignTransaction(psbt, failure_reason)) { - tfm::format(std::cerr, "Failed to sign: %s\n", failure_reason); + LogWarning("Failed to sign: %s\n", failure_reason); return PSBTError::EXTERNAL_SIGNER_FAILED; } if (finalize) FinalizePSBT(psbt); // This won't work in a multisig setup From 95969bc58ae0cd928e536d7cb8541de93e8c7205 Mon Sep 17 00:00:00 2001 From: kevkevinpal Date: Sun, 6 Apr 2025 15:31:07 -0400 Subject: [PATCH 023/390] test: added fuzz coverage to consensus/merkle.cpp This adds a new fuzz target merkle which adds fuzz coverage to consensus/merkle.cpp --- src/test/fuzz/CMakeLists.txt | 1 + src/test/fuzz/merkle.cpp | 92 ++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 src/test/fuzz/merkle.cpp diff --git a/src/test/fuzz/CMakeLists.txt b/src/test/fuzz/CMakeLists.txt index 64fbe69dc00e..6ca30c1cbed7 100644 --- a/src/test/fuzz/CMakeLists.txt +++ b/src/test/fuzz/CMakeLists.txt @@ -59,6 +59,7 @@ add_executable(fuzz kitchen_sink.cpp load_external_block_file.cpp locale.cpp + merkle.cpp merkleblock.cpp message.cpp miniscript.cpp diff --git a/src/test/fuzz/merkle.cpp b/src/test/fuzz/merkle.cpp new file mode 100644 index 000000000000..84055ac861cd --- /dev/null +++ b/src/test/fuzz/merkle.cpp @@ -0,0 +1,92 @@ +// Copyright (c) 2025 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +uint256 ComputeMerkleRootFromPath(const CBlock& block, uint32_t position, const std::vector& merkle_path) { + if (position >= block.vtx.size()) { + throw std::out_of_range("Position out of range"); + } + + uint256 current_hash = block.vtx[position]->GetHash(); + + for (const uint256& sibling : merkle_path) { + if (position % 2 == 0) { + current_hash = Hash(current_hash, sibling); + } else { + current_hash = Hash(sibling, current_hash); + } + position = position / 2; + } + + return current_hash; +} + +FUZZ_TARGET(merkle) +{ + FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); + + const bool with_witness = fuzzed_data_provider.ConsumeBool(); + std::optional block {ConsumeDeserializable(fuzzed_data_provider, with_witness ? TX_WITH_WITNESS : TX_NO_WITNESS)}; + if (!block){ + return; + } + const size_t num_txs = block->vtx.size(); + std::vector tx_hashes; + tx_hashes.reserve(num_txs); + + for (size_t i = 0; i < num_txs; ++i) { + tx_hashes.push_back(block->vtx[i]->GetHash()); + } + + // Test ComputeMerkleRoot + bool mutated = fuzzed_data_provider.ConsumeBool(); + const uint256 merkle_root = ComputeMerkleRoot(tx_hashes, &mutated); + + // Basic sanity checks for ComputeMerkleRoot + if (tx_hashes.size() == 1) { + assert(merkle_root == tx_hashes[0]); + } + + + const uint256 block_merkle_root = BlockMerkleRoot(*block, &mutated); + if (tx_hashes.size() == 1) { + assert(block_merkle_root == tx_hashes[0]); + } + + if (!block->vtx.empty()){ + const uint256 block_witness_merkle_root = BlockWitnessMerkleRoot(*block, &mutated); + if (tx_hashes.size() == 1) { + assert(block_witness_merkle_root == uint256()); + } + } + + // Test TransactionMerklePath + const uint32_t position = fuzzed_data_provider.ConsumeIntegralInRange(0, num_txs > 0 ? num_txs - 1 : 0); + std::vector merkle_path = TransactionMerklePath(*block, position); + + // Check that the root we compute from TransactionMerklePath equals the same merkle_root and block_merkle_root + if (tx_hashes.size() > 1) { + uint256 merkle_root_from_merkle_path = ComputeMerkleRootFromPath(*block, position, merkle_path); + assert(merkle_root_from_merkle_path == merkle_root); + assert(merkle_root_from_merkle_path == block_merkle_root); + } + + // Basic sanity checks for TransactionMerklePath + assert(merkle_path.size() <= 32); // Maximum depth of a Merkle tree with 2^32 leaves + if (num_txs == 1 || num_txs == 0) { + assert(merkle_path.empty()); // Single transaction has no path + } + +} From d4e212e8a69ea118acb6caa1a7efe64a77bdfdd2 Mon Sep 17 00:00:00 2001 From: Roman Zeyde Date: Sun, 30 Jun 2024 20:51:51 +0300 Subject: [PATCH 024/390] rest: fetch spent transaction outputs by blockhash Today, it is possible to fetch a block's spent prevouts in order to build an external index by using the `/rest/block/HASH.json` endpoint. However, its performance is low due to JSON serialization overhead. We can significantly optimize it by adding a new REST endpoint, using a binary response format: ``` $ BLOCKHASH=00000000000000000002a7c4c1e48d76c5a37902165a270156b7a8d72728a054 $ ab -k -c 1 -n 100 http://localhost:8332/rest/block/$BLOCKHASH.json Document Length: 13278152 bytes Requests per second: 3.53 [#/sec] (mean) Time per request: 283.569 [ms] (mean) $ ab -k -c 1 -n 10000 http://localhost:8332/rest/spentoutputs/$BLOCKHASH.bin Document Length: 195591 bytes Requests per second: 254.47 [#/sec] (mean) Time per request: 3.930 [ms] (mean) ``` Currently, this PR is being used and tested by Bindex: * https://github.com/romanz/bindex-rs This PR would allow to improve the performance of external indexers such as electrs, ElectrumX, Fulcrum and Blockbook: * https://github.com/romanz/electrs (also https://github.com/Blockstream/electrs and https://github.com/mempool/electrs) * https://github.com/spesmilo/electrumx * https://github.com/cculianu/Fulcrum * https://github.com/trezor/blockbook --- src/rest.cpp | 109 +++++++++++++++++++++ test/functional/interface_rest.py | 30 ++++++ test/functional/test_framework/messages.py | 5 + 3 files changed, 144 insertions(+) diff --git a/src/rest.cpp b/src/rest.cpp index 44984b360ff3..eb2dcfe3722f 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -281,6 +282,113 @@ static bool rest_headers(const std::any& context, } } +/** + * Serialize spent outputs as a list of per-transaction CTxOut lists using binary format. + */ +static void SerializeBlockUndo(DataStream& stream, const CBlockUndo& block_undo) +{ + WriteCompactSize(stream, block_undo.vtxundo.size() + 1); + WriteCompactSize(stream, 0); // block_undo.vtxundo doesn't contain coinbase tx + for (const CTxUndo& tx_undo : block_undo.vtxundo) { + WriteCompactSize(stream, tx_undo.vprevout.size()); + for (const Coin& coin : tx_undo.vprevout) { + coin.out.Serialize(stream); + } + } +} + +/** + * Serialize spent outputs as a list of per-transaction CTxOut lists using JSON format. + */ +static void BlockUndoToJSON(const CBlockUndo& block_undo, UniValue& result) +{ + result.push_back({UniValue::VARR}); // block_undo.vtxundo doesn't contain coinbase tx + for (const CTxUndo& tx_undo : block_undo.vtxundo) { + UniValue tx_prevouts(UniValue::VARR); + for (const Coin& coin : tx_undo.vprevout) { + UniValue prevout(UniValue::VOBJ); + prevout.pushKV("value", ValueFromAmount(coin.out.nValue)); + + UniValue script_pub_key(UniValue::VOBJ); + ScriptToUniv(coin.out.scriptPubKey, /*out=*/script_pub_key, /*include_hex=*/true, /*include_address=*/true); + prevout.pushKV("scriptPubKey", std::move(script_pub_key)); + + tx_prevouts.push_back(std::move(prevout)); + } + result.push_back(std::move(tx_prevouts)); + } +} + +static bool rest_spent_txouts(const std::any& context, HTTPRequest* req, const std::string& strURIPart) +{ + if (!CheckWarmup(req)) { + return false; + } + std::string param; + const RESTResponseFormat rf = ParseDataFormat(param, strURIPart); + std::vector path = SplitString(param, '/'); + + std::string hashStr; + if (path.size() == 1) { + // path with query parameter: /rest/spenttxouts/ + hashStr = path[0]; + } else { + return RESTERR(req, HTTP_BAD_REQUEST, "Invalid URI format. Expected /rest/spenttxouts/."); + } + + auto hash{uint256::FromHex(hashStr)}; + if (!hash) { + return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr); + } + + ChainstateManager* chainman = GetChainman(context, req); + if (!chainman) { + return false; + } + + const CBlockIndex* pblockindex = WITH_LOCK(cs_main, return chainman->m_blockman.LookupBlockIndex(*hash)); + if (!pblockindex) { + return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); + } + + CBlockUndo block_undo; + if (pblockindex->nHeight > 0 && !chainman->m_blockman.ReadBlockUndo(block_undo, *pblockindex)) { + return RESTERR(req, HTTP_NOT_FOUND, hashStr + " undo not available"); + } + + switch (rf) { + case RESTResponseFormat::BINARY: { + DataStream ssSpentResponse{}; + SerializeBlockUndo(ssSpentResponse, block_undo); + req->WriteHeader("Content-Type", "application/octet-stream"); + req->WriteReply(HTTP_OK, ssSpentResponse); + return true; + } + + case RESTResponseFormat::HEX: { + DataStream ssSpentResponse{}; + SerializeBlockUndo(ssSpentResponse, block_undo); + const std::string strHex{HexStr(ssSpentResponse) + "\n"}; + req->WriteHeader("Content-Type", "text/plain"); + req->WriteReply(HTTP_OK, strHex); + return true; + } + + case RESTResponseFormat::JSON: { + UniValue result(UniValue::VARR); + BlockUndoToJSON(block_undo, result); + std::string strJSON = result.write() + "\n"; + req->WriteHeader("Content-Type", "application/json"); + req->WriteReply(HTTP_OK, strJSON); + return true; + } + + default: { + return RESTERR(req, HTTP_NOT_FOUND, "output format not found (available: " + AvailableDataFormatsString() + ")"); + } + } +} + static bool rest_block(const std::any& context, HTTPRequest* req, const std::string& strURIPart, @@ -1021,6 +1129,7 @@ static const struct { {"/rest/deploymentinfo/", rest_deploymentinfo}, {"/rest/deploymentinfo", rest_deploymentinfo}, {"/rest/blockhashbyheight/", rest_blockhash_by_height}, + {"/rest/spenttxouts/", rest_spent_txouts}, }; void StartREST(const std::any& context) diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index 0e294696b0ec..170c6696cd0b 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -6,6 +6,7 @@ from decimal import Decimal from enum import Enum +from io import BytesIO import http.client import json import typing @@ -15,6 +16,7 @@ from test_framework.messages import ( BLOCK_HEADER_SIZE, COIN, + deser_block_spent_outputs, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -424,6 +426,34 @@ def run_test(self): assert_equal(self.test_rest_request(f"/headers/{bb_hash}", query_params={"count": 1}), self.test_rest_request(f"/headers/1/{bb_hash}")) assert_equal(self.test_rest_request(f"/blockfilterheaders/basic/{bb_hash}", query_params={"count": 1}), self.test_rest_request(f"/blockfilterheaders/basic/5/{bb_hash}")) + self.log.info("Test the /spenttxouts URI") + + block_count = self.nodes[0].getblockcount() + for height in range(0, block_count + 1): + blockhash = self.nodes[0].getblockhash(height) + spent_bin = self.test_rest_request(f"/spenttxouts/{blockhash}", req_type=ReqType.BIN, ret_type=RetType.BYTES) + spent_hex = self.test_rest_request(f"/spenttxouts/{blockhash}", req_type=ReqType.HEX, ret_type=RetType.BYTES) + spent_json = self.test_rest_request(f"/spenttxouts/{blockhash}", req_type=ReqType.JSON, ret_type=RetType.JSON) + + assert_equal(bytes.fromhex(spent_hex.decode()), spent_bin) + + spent = deser_block_spent_outputs(BytesIO(spent_bin)) + block = self.nodes[0].getblock(blockhash, 3) # return prevout for each input + assert_equal(len(spent), len(block["tx"])) + assert_equal(len(spent_json), len(block["tx"])) + + for i, tx in enumerate(block["tx"]): + prevouts = [txin["prevout"] for txin in tx["vin"] if "coinbase" not in txin] + # compare with `getblock` JSON output (coinbase tx has no prevouts) + actual = [(txout.scriptPubKey.hex(), Decimal(txout.nValue) / COIN) for txout in spent[i]] + expected = [(p["scriptPubKey"]["hex"], p["value"]) for p in prevouts] + assert_equal(expected, actual) + # also compare JSON format + actual = [(prevout["scriptPubKey"], prevout["value"]) for prevout in spent_json[i]] + expected = [(p["scriptPubKey"], p["value"]) for p in prevouts] + assert_equal(expected, actual) + + self.log.info("Test the /deploymentinfo URI") deployment_info = self.nodes[0].getdeploymentinfo() diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index e6f72f43b54d..322266b7c2d0 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -228,6 +228,11 @@ def ser_string_vector(l): return r +def deser_block_spent_outputs(f): + nit = deser_compact_size(f) + return [deser_vector(f, CTxOut) for _ in range(nit)] + + def from_hex(obj, hex_string): """Deserialize from a hex string representation (e.g. from RPC) From 4f10a57671c19cacca630b2401e42a213aacff1b Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 10 Jun 2025 14:59:46 +0100 Subject: [PATCH 025/390] depends: Override host compilers for FreeBSD and OpenBSD When building depends on FreeBSD/OpenBSD `aarch64`, the host compilers default to `default_host_{CC,CXX}`, which resolves to `gcc`/`g++`. This is incorrect on these systems, where Clang is the default system compiler. --- depends/builders/freebsd.mk | 4 ++++ depends/builders/openbsd.mk | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/depends/builders/freebsd.mk b/depends/builders/freebsd.mk index 18316f492ee9..910de28bf36f 100644 --- a/depends/builders/freebsd.mk +++ b/depends/builders/freebsd.mk @@ -3,3 +3,7 @@ build_freebsd_CXX=clang++ build_freebsd_SHA256SUM = sha256sum build_freebsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o + +# freebsd host on freebsd builder: override freebsd host preferences. +freebsd_CC = clang +freebsd_CXX = clang++ diff --git a/depends/builders/openbsd.mk b/depends/builders/openbsd.mk index 9c94c4baae7a..5c068f49fefd 100644 --- a/depends/builders/openbsd.mk +++ b/depends/builders/openbsd.mk @@ -7,3 +7,7 @@ build_openbsd_DOWNLOAD = curl --location --fail --connect-timeout $(DOWNLOAD_CON build_openbsd_TAR = gtar # openBSD touch doesn't understand -h build_openbsd_TOUCH = touch -m -t 200001011200 + +# openbsd host on openbsd builder: override openbsd host preferences. +openbsd_CC = clang +openbsd_CXX = clang++ From bac9ee4830664c86c1cb3d38a5b19c722aae2f54 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Wed, 21 May 2025 13:36:43 -0400 Subject: [PATCH 026/390] p2p: Add witness mutation check inside FillBlock Since #29412, we have not allowed mutated blocks to continue being processed immediately the block is received, but this is only done for the legacy BLOCK message. Extend these checks as belt-and-suspenders to not allow similar mutation strategies to affect relay by honest peers by applying the check inside PartiallyDownloadedBlock::FillBlock, immediately before returning READ_STATUS_OK. This also removes the extraneous CheckBlock call. --- src/blockencodings.cpp | 17 ++++----- src/blockencodings.h | 7 ++-- src/net_processing.cpp | 10 ++++-- src/test/blockencodings_tests.cpp | 16 ++++----- src/test/fuzz/partially_downloaded_block.cpp | 38 ++++++-------------- 5 files changed, 37 insertions(+), 51 deletions(-) diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index a65072eeacff..947f605d8e6e 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -179,7 +179,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const return txn_available[index] != nullptr; } -ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing) +ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active) { if (header.IsNull()) return READ_STATUS_INVALID; @@ -206,16 +206,11 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< if (vtx_missing.size() != tx_missing_offset) return READ_STATUS_INVALID; - BlockValidationState state; - CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock; - if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) { - // TODO: We really want to just check merkle tree manually here, - // but that is expensive, and CheckBlock caches a block's - // "checked-status" (in the CBlock?). CBlock should be able to - // check its own merkle root and cache that check. - if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) - return READ_STATUS_FAILED; // Possible Short ID collision - return READ_STATUS_CHECKBLOCK_FAILED; + // Check for possible mutations early now that we have a seemingly good block + IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated}; + if (check_mutated(/*block=*/block, + /*check_witness_root=*/segwit_active)) { + return READ_STATUS_FAILED; // Possible Short ID collision } LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %u txn prefilled, %u txn from mempool (incl at least %u from extra pool) and %u txn (%u bytes) requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size(), tx_missing_size); diff --git a/src/blockencodings.h b/src/blockencodings.h index c92aa05e8057..b1f82d18c5dc 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -141,15 +141,16 @@ class PartiallyDownloadedBlock { CBlockHeader header; // Can be overridden for testing - using CheckBlockFn = std::function; - CheckBlockFn m_check_block_mock{nullptr}; + using IsBlockMutatedFn = std::function; + IsBlockMutatedFn m_check_block_mutated_mock{nullptr}; explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {} // extra_txn is a list of extra orphan/conflicted/etc transactions to look at ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector& extra_txn); bool IsTxAvailable(size_t index) const; - ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing); + // segwit_active enforces witness mutation checks just before reporting a healthy status + ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active); }; #endif // BITCOIN_BLOCKENCODINGS_H diff --git a/src/net_processing.cpp b/src/net_processing.cpp index ead69f086df9..059727a93c75 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3360,7 +3360,11 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl } PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; - ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn); + + // We should not have gotten this far in compact block processing unless it's attached to a known header + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))}; + ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_INVALID) { RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(peer, "invalid compact block/non-matching block transactions"); @@ -4529,7 +4533,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } std::vector dummy; - status = tempBlock.FillBlock(*pblock, dummy); + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))}; + status = tempBlock.FillBlock(*pblock, dummy, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index ed95a8831e36..d40a0a94aef1 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -95,21 +95,21 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[2]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[2]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -182,14 +182,14 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[1]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[1]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } BOOST_CHECK_EQUAL(pool.get(block.vtx[2]->GetHash()).use_count(), SHARED_TX_OFFSET + 2); // +2 because of partialBlock and block2 @@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block3; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -252,7 +252,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) CBlock block2; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); bool mutated; BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); @@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) CBlock block2; std::vector vtx_missing; - BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index e3a2f75d8e0c..bf2fc98d9eda 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -36,14 +36,10 @@ void initialize_pdb() g_setup = testing_setup.get(); } -PartiallyDownloadedBlock::CheckBlockFn FuzzedCheckBlock(std::optional result) +PartiallyDownloadedBlock::IsBlockMutatedFn FuzzedIsBlockMutated(bool result) { - return [result](const CBlock&, BlockValidationState& state, const Consensus::Params&, bool, bool) { - if (result) { - return state.Invalid(*result); - } - - return true; + return [result](const CBlock& block, bool) { + return result; }; } @@ -115,35 +111,23 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) skipped_missing |= (!pdb.IsTxAvailable(i) && skip); } - // Mock CheckBlock - bool fail_check_block{fuzzed_data_provider.ConsumeBool()}; - auto validation_result = - fuzzed_data_provider.PickValueInArray( - {BlockValidationResult::BLOCK_RESULT_UNSET, - BlockValidationResult::BLOCK_CONSENSUS, - BlockValidationResult::BLOCK_CACHED_INVALID, - BlockValidationResult::BLOCK_INVALID_HEADER, - BlockValidationResult::BLOCK_MUTATED, - BlockValidationResult::BLOCK_MISSING_PREV, - BlockValidationResult::BLOCK_INVALID_PREV, - BlockValidationResult::BLOCK_TIME_FUTURE, - BlockValidationResult::BLOCK_HEADER_LOW_WORK}); - pdb.m_check_block_mock = FuzzedCheckBlock( - fail_check_block ? - std::optional{validation_result} : - std::nullopt); + bool segwit_active{fuzzed_data_provider.ConsumeBool()}; + + // Mock IsBlockMutated + bool fail_block_mutated{fuzzed_data_provider.ConsumeBool()}; + pdb.m_check_block_mutated_mock = FuzzedIsBlockMutated(fail_block_mutated); CBlock reconstructed_block; - auto fill_status{pdb.FillBlock(reconstructed_block, missing)}; + auto fill_status{pdb.FillBlock(reconstructed_block, missing, segwit_active)}; switch (fill_status) { case READ_STATUS_OK: assert(!skipped_missing); - assert(!fail_check_block); + assert(!fail_block_mutated); assert(block->GetHash() == reconstructed_block.GetHash()); break; case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]]; case READ_STATUS_FAILED: - assert(fail_check_block); + assert(fail_block_mutated); break; case READ_STATUS_INVALID: break; From 28299ce77636d7563ec545d043cf1b61bd2f01c1 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Tue, 3 Jun 2025 10:29:00 -0400 Subject: [PATCH 027/390] p2p: remove vestigial READ_STATUS_CHECKBLOCK_FAILED --- src/blockencodings.h | 2 -- src/net_processing.cpp | 18 +----------------- src/test/fuzz/partially_downloaded_block.cpp | 1 - 3 files changed, 1 insertion(+), 20 deletions(-) diff --git a/src/blockencodings.h b/src/blockencodings.h index b1f82d18c5dc..fce59bc56149 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -84,8 +84,6 @@ typedef enum ReadStatus_t READ_STATUS_OK, READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap READ_STATUS_FAILED, // Failed to process object - READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a - // failure in CheckBlock. } ReadStatus; class CBlockHeaderAndShortTxIDs { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 059727a93c75..535a32c04ebc 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3381,23 +3381,7 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl return; } } else { - // Block is either okay, or possibly we received - // READ_STATUS_CHECKBLOCK_FAILED. - // Note that CheckBlock can only fail for one of a few reasons: - // 1. bad-proof-of-work (impossible here, because we've already - // accepted the header) - // 2. merkleroot doesn't match the transactions given (already - // caught in FillBlock with READ_STATUS_FAILED, so - // impossible here) - // 3. the block is otherwise invalid (eg invalid coinbase, - // block is too big, too many legacy sigops, etc). - // So if CheckBlock failed, #3 is the only possibility. - // Under BIP 152, we don't discourage the peer unless proof of work is - // invalid (we don't require all the stateless checks to have - // been run). This is handled below, so just treat this as - // though the block was successfully read, and rely on the - // handling in ProcessNewBlock to ensure the block index is - // updated, etc. + // Block is okay for further processing RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer fBlockRead = true; // mapBlockSource is used for potentially punishing peers and diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index bf2fc98d9eda..116b446e5358 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -125,7 +125,6 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) assert(!fail_block_mutated); assert(block->GetHash() == reconstructed_block.GetHash()); break; - case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]]; case READ_STATUS_FAILED: assert(fail_block_mutated); break; From e9331cd6ab2c756c56e8b27a2de2a6d4884c0c06 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Tue, 10 Jun 2025 19:58:48 +0000 Subject: [PATCH 028/390] wallet: IsEquivalentTo should strip witness data in addition to scriptsigs --- src/wallet/transaction.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/wallet/transaction.cpp b/src/wallet/transaction.cpp index 561880482f8c..a611a034d9e6 100644 --- a/src/wallet/transaction.cpp +++ b/src/wallet/transaction.cpp @@ -13,8 +13,14 @@ bool CWalletTx::IsEquivalentTo(const CWalletTx& _tx) const { CMutableTransaction tx1 {*this->tx}; CMutableTransaction tx2 {*_tx.tx}; - for (auto& txin : tx1.vin) txin.scriptSig = CScript(); - for (auto& txin : tx2.vin) txin.scriptSig = CScript(); + for (auto& txin : tx1.vin) { + txin.scriptSig = CScript(); + txin.scriptWitness.SetNull(); + } + for (auto& txin : tx2.vin) { + txin.scriptSig = CScript(); + txin.scriptWitness.SetNull(); + } return CTransaction(tx1) == CTransaction(tx2); } From 0ec255139be3745a135386e9db957fe81bc3d833 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 10 Jun 2025 12:10:19 -0700 Subject: [PATCH 029/390] wallet, rpc: Remove deprecated balances from getwalletinfo --- src/wallet/rpc/wallet.cpp | 7 ------- test/functional/wallet_balance.py | 3 --- test/functional/wallet_basic.py | 9 ++++----- test/functional/wallet_multiwallet.py | 5 ++--- test/functional/wallet_reorgsrestore.py | 8 ++++---- 5 files changed, 10 insertions(+), 22 deletions(-) diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp index 4bd4851609d5..f1a613bc6bc1 100644 --- a/src/wallet/rpc/wallet.cpp +++ b/src/wallet/rpc/wallet.cpp @@ -42,9 +42,6 @@ static RPCHelpMan getwalletinfo() {RPCResult::Type::STR, "walletname", "the wallet name"}, {RPCResult::Type::NUM, "walletversion", "the wallet version"}, {RPCResult::Type::STR, "format", "the database format (only sqlite)"}, - {RPCResult::Type::STR_AMOUNT, "balance", "DEPRECATED. Identical to getbalances().mine.trusted"}, - {RPCResult::Type::STR_AMOUNT, "unconfirmed_balance", "DEPRECATED. Identical to getbalances().mine.untrusted_pending"}, - {RPCResult::Type::STR_AMOUNT, "immature_balance", "DEPRECATED. Identical to getbalances().mine.immature"}, {RPCResult::Type::NUM, "txcount", "the total number of transactions in the wallet"}, {RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"}, {RPCResult::Type::NUM, "keypoolsize_hd_internal", /*optional=*/true, "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"}, @@ -82,13 +79,9 @@ static RPCHelpMan getwalletinfo() UniValue obj(UniValue::VOBJ); size_t kpExternalSize = pwallet->KeypoolCountExternalKeys(); - const auto bal = GetBalance(*pwallet); obj.pushKV("walletname", pwallet->GetName()); obj.pushKV("walletversion", pwallet->GetVersion()); obj.pushKV("format", pwallet->GetDatabase().Format()); - obj.pushKV("balance", ValueFromAmount(bal.m_mine_trusted)); - obj.pushKV("unconfirmed_balance", ValueFromAmount(bal.m_mine_untrusted_pending)); - obj.pushKV("immature_balance", ValueFromAmount(bal.m_mine_immature)); obj.pushKV("txcount", (int)pwallet->mapWallet.size()); obj.pushKV("keypoolsize", (int64_t)kpExternalSize); diff --git a/test/functional/wallet_balance.py b/test/functional/wallet_balance.py index 4efc0169eb3c..62160c635ad8 100755 --- a/test/functional/wallet_balance.py +++ b/test/functional/wallet_balance.py @@ -170,9 +170,6 @@ def test_balances(*, fee_node_1=0): # getunconfirmedbalance assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent - # getwalletinfo.unconfirmed_balance - assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60')) - assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1) test_balances(fee_node_1=Decimal('0.01')) diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index ce2c15962102..23c1616f9c6d 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -69,9 +69,9 @@ def run_test(self): self.generate(self.nodes[0], 1, sync_fun=self.no_op) - walletinfo = self.nodes[0].getwalletinfo() - assert_equal(walletinfo['immature_balance'], 50) - assert_equal(walletinfo['balance'], 0) + balances = self.nodes[0].getbalances() + assert_equal(balances["mine"]["immature"], 50) + assert_equal(balances["mine"]["trusted"], 0) self.sync_all(self.nodes[0:3]) self.generate(self.nodes[1], COINBASE_MATURITY + 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) @@ -118,8 +118,7 @@ def run_test(self): # but 10 will go to node2 and the rest will go to node0 balance = self.nodes[0].getbalance() assert_equal(set([txout1['value'], txout2['value']]), set([10, balance])) - walletinfo = self.nodes[0].getwalletinfo() - assert_equal(walletinfo['immature_balance'], 0) + assert_equal(self.nodes[0].getbalances()["mine"]["immature"], 0) # Have node0 mine a block, thus it will collect its own fee. self.generate(self.nodes[0], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index 87c8f6275520..60fc98e3c28a 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -185,8 +185,7 @@ def wallet_file(name): self.nodes[0].loadwallet("w5") assert_equal(set(node.listwallets()), {"w4", "w5"}) w5 = wallet("w5") - w5_info = w5.getwalletinfo() - assert_equal(w5_info['immature_balance'], 50) + assert_equal(w5.getbalances()["mine"]["immature"], 50) competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir') os.mkdir(competing_wallet_dir) @@ -208,7 +207,7 @@ def wallet_file(name): self.generatetoaddress(node, nblocks=1, address=wallets[0].getnewaddress(), sync_fun=self.no_op) for wallet_name, wallet in zip(wallet_names, wallets): info = wallet.getwalletinfo() - assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0) + assert_equal(wallet.getbalances()["mine"]["immature"], 50 if wallet is wallets[0] else 0) assert_equal(info['walletname'], wallet_name) # accessing invalid wallet fails diff --git a/test/functional/wallet_reorgsrestore.py b/test/functional/wallet_reorgsrestore.py index 73ca173f237b..1567fb30d0ed 100755 --- a/test/functional/wallet_reorgsrestore.py +++ b/test/functional/wallet_reorgsrestore.py @@ -100,7 +100,7 @@ def test_reorg_handling_during_unclean_shutdown(self): # Restart to ensure node and wallet are flushed self.restart_node(0) wallet = node.get_wallet_rpc("reorg_crash") - assert_greater_than(wallet.getwalletinfo()['immature_balance'], 0) + assert_greater_than(wallet.getbalances()["mine"]["immature"], 0) # Disconnect tip and sync wallet state tip = wallet.getbestblockhash() @@ -108,7 +108,7 @@ def test_reorg_handling_during_unclean_shutdown(self): wallet.syncwithvalidationinterfacequeue() # Tip was disconnected, ensure coinbase has been abandoned - assert_equal(wallet.getwalletinfo()['immature_balance'], 0) + assert_equal(wallet.getbalances()["mine"]["immature"], 0) coinbase_tx_id = wallet.getblock(tip, verbose=1)["tx"][0] assert_equal(wallet.gettransaction(coinbase_tx_id)['details'][0]['abandoned'], True) @@ -131,12 +131,12 @@ def test_reorg_handling_during_unclean_shutdown(self): assert(node.getbestblockhash() != tip) # Ensure wallet state is consistent now assert_equal(wallet.gettransaction(coinbase_tx_id)['details'][0]['abandoned'], True) - assert_equal(wallet.getwalletinfo()['immature_balance'], 0) + assert_equal(wallet.getbalances()["mine"]["immature"], 0) # And finally, verify the state if the block ends up being into the best chain again node.reconsiderblock(tip) assert_equal(wallet.gettransaction(coinbase_tx_id)['details'][0]['abandoned'], False) - assert_greater_than(wallet.getwalletinfo()['immature_balance'], 0) + assert_greater_than(wallet.getbalances()["mine"]["immature"], 0) def run_test(self): # Send a tx from which to conflict outputs later From c3fe85e2d6dd4f251a62a99fd891b0fa370f9712 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 10 Jun 2025 12:11:07 -0700 Subject: [PATCH 030/390] wallet, rpc, test: Remove deprecated getunconfirmedbalance --- src/wallet/rpc/coins.cpp | 23 ----------------------- src/wallet/rpc/wallet.cpp | 2 -- test/functional/wallet_balance.py | 3 --- 3 files changed, 28 deletions(-) diff --git a/src/wallet/rpc/coins.cpp b/src/wallet/rpc/coins.cpp index cce9b26babef..16d93de1216e 100644 --- a/src/wallet/rpc/coins.cpp +++ b/src/wallet/rpc/coins.cpp @@ -214,29 +214,6 @@ RPCHelpMan getbalance() }; } -RPCHelpMan getunconfirmedbalance() -{ - return RPCHelpMan{"getunconfirmedbalance", - "DEPRECATED\nIdentical to getbalances().mine.untrusted_pending\n", - {}, - RPCResult{RPCResult::Type::NUM, "", "The balance"}, - RPCExamples{""}, - [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue -{ - const std::shared_ptr pwallet = GetWalletForJSONRPCRequest(request); - if (!pwallet) return UniValue::VNULL; - - // Make sure the results are valid at least up to the most recent block - // the user could have gotten from another RPC command prior to now - pwallet->BlockUntilSyncedToCurrentChain(); - - LOCK(pwallet->cs_wallet); - - return ValueFromAmount(GetBalance(*pwallet).m_mine_untrusted_pending); -}, - }; -} - RPCHelpMan lockunspent() { return RPCHelpMan{ diff --git a/src/wallet/rpc/wallet.cpp b/src/wallet/rpc/wallet.cpp index f1a613bc6bc1..a86d92af836b 100644 --- a/src/wallet/rpc/wallet.cpp +++ b/src/wallet/rpc/wallet.cpp @@ -956,7 +956,6 @@ RPCHelpMan restorewallet(); RPCHelpMan getreceivedbyaddress(); RPCHelpMan getreceivedbylabel(); RPCHelpMan getbalance(); -RPCHelpMan getunconfirmedbalance(); RPCHelpMan lockunspent(); RPCHelpMan listlockunspent(); RPCHelpMan getbalances(); @@ -1016,7 +1015,6 @@ std::span GetWalletRPCCommands() {"wallet", &getreceivedbyaddress}, {"wallet", &getreceivedbylabel}, {"wallet", &gettransaction}, - {"wallet", &getunconfirmedbalance}, {"wallet", &getbalances}, {"wallet", &getwalletinfo}, {"wallet", &importdescriptors}, diff --git a/test/functional/wallet_balance.py b/test/functional/wallet_balance.py index 62160c635ad8..c3d9596b8a04 100755 --- a/test/functional/wallet_balance.py +++ b/test/functional/wallet_balance.py @@ -167,9 +167,6 @@ def test_balances(*, fee_node_1=0): # TODO: fix getbalance tracking of coin spentness depth assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0')) assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0')) - # getunconfirmedbalance - assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend - assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent test_balances(fee_node_1=Decimal('0.01')) From cbf9b2dab1d8800d63d65904ccfd64e1e439e510 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Tue, 10 Jun 2025 20:52:46 +0000 Subject: [PATCH 031/390] mempool: codify existing assumption about duplicate txids during removal Also explicitly check for txid equality rather than transaction equality as the former is a tighter constraint if witness data is included when comparing the full transactions. Co-authored-by: glozow --- src/txmempool.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 3a5a3fb306d3..10ff535828ac 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -652,7 +652,7 @@ void CTxMemPool::removeConflicts(const CTransaction &tx) auto it = mapNextTx.find(txin.prevout); if (it != mapNextTx.end()) { const CTransaction &txConflict = *it->second; - if (txConflict != tx) + if (Assume(txConflict.GetHash() != tx.GetHash())) { ClearPrioritisation(txConflict.GetHash()); removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT); From 6efbd1e1dcdfbe9eae2d5c22abab3ee616a75ff2 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Tue, 10 Jun 2025 21:13:59 +0000 Subject: [PATCH 032/390] refactor: CTransaction equality should consider witness data It is not at all obvious that two transactions with differing witness data should test equal to each other. There was only a single instance of a caller relying on this behavior, and that one appears accidental (left-over from before segwit). That caller (in the wallet) has been fixed. Change the definition of transaction equality (and inequality) to use the wtxid instead. --- src/primitives/transaction.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index bf86562886a7..ad8172709495 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -360,12 +360,12 @@ class CTransaction friend bool operator==(const CTransaction& a, const CTransaction& b) { - return a.hash == b.hash; + return a.GetWitnessHash() == b.GetWitnessHash(); } friend bool operator!=(const CTransaction& a, const CTransaction& b) { - return a.hash != b.hash; + return !operator==(a, b); } std::string ToString() const; From 578ea3eedb285519762087a4b27d953d8f61667f Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Wed, 11 Jun 2025 11:37:24 +0200 Subject: [PATCH 033/390] test: round difficulty and networkhashps Both are rational numbers. Client software should only use them to display information to humans. Followup calculations should use the underlying values such as target. Therefore it's not necessary to test the handling of these floating point values. Round them down to avoid spurious test failures. Fixes #32515 --- test/functional/mining_basic.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py index 12f1a4ddf5be..071b2cd36be0 100755 --- a/test/functional/mining_basic.py +++ b/test/functional/mining_basic.py @@ -390,14 +390,13 @@ def assert_submitblock(block, result_str_1, result_str_2=None): assert 'currentblockweight' not in mining_info assert_equal(mining_info['bits'], nbits_str(REGTEST_N_BITS)) assert_equal(mining_info['target'], target_str(REGTEST_TARGET)) - assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10')) - assert_equal(mining_info['next'], { - 'height': 201, - 'target': target_str(REGTEST_TARGET), - 'bits': nbits_str(REGTEST_N_BITS), - 'difficulty': Decimal('4.656542373906925E-10') - }) - assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334')) + # We don't care about precision, round to avoid mismatch under Valgrind: + assert_equal(round(mining_info['difficulty'], 10), Decimal('0.0000000005')) + assert_equal(mining_info['next']['height'], 201) + assert_equal(mining_info['next']['target'], target_str(REGTEST_TARGET)) + assert_equal(mining_info['next']['bits'], nbits_str(REGTEST_N_BITS)) + assert_equal(round(mining_info['next']['difficulty'], 10), Decimal('0.0000000005')) + assert_equal(round(mining_info['networkhashps'], 5), Decimal('0.00333')) assert_equal(mining_info['pooledtx'], 0) self.log.info("getblocktemplate: Test default witness commitment") From 88113125716c50ce4deb864041840d53a567554c Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 15 Jan 2024 17:08:47 -0500 Subject: [PATCH 034/390] script/parsing: Allow Const to not skip the found constant When parsing a descriptor, it is useful to be able to check whether a string begins with a substring without consuming that substring as another function such as Func() will be used later which requires that substring to be present at the beginning. Specifically, for MuSig2, this modified Const will be used to determine whether a an expression begins with "musig(" before a subsequent Func("musig", ...) is used. --- src/script/parsing.cpp | 4 ++-- src/script/parsing.h | 4 ++-- src/test/util_tests.cpp | 16 +++++++++++++++- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/script/parsing.cpp b/src/script/parsing.cpp index 254ced6f0b04..dd79f67e58e2 100644 --- a/src/script/parsing.cpp +++ b/src/script/parsing.cpp @@ -12,10 +12,10 @@ namespace script { -bool Const(const std::string& str, std::span& sp) +bool Const(const std::string& str, std::span& sp, bool skip) { if ((size_t)sp.size() >= str.size() && std::equal(str.begin(), str.end(), sp.begin())) { - sp = sp.subspan(str.size()); + if (skip) sp = sp.subspan(str.size()); return true; } return false; diff --git a/src/script/parsing.h b/src/script/parsing.h index 8986aa57d070..462a42e7b5e8 100644 --- a/src/script/parsing.h +++ b/src/script/parsing.h @@ -13,10 +13,10 @@ namespace script { /** Parse a constant. * - * If sp's initial part matches str, sp is updated to skip that part, and true is returned. + * If sp's initial part matches str, sp is optionally updated to skip that part, and true is returned. * Otherwise sp is unmodified and false is returned. */ -bool Const(const std::string& str, std::span& sp); +bool Const(const std::string& str, std::span& sp, bool skip = true); /** Parse a function call. * diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 05d2c914c6a8..8016c2d8c566 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -1137,13 +1137,24 @@ BOOST_AUTO_TEST_CASE(test_script_parsing) BOOST_CHECK(success); BOOST_CHECK_EQUAL(SpanToStr(sp), "MilkToastHoney"); + success = Const("Milk", sp, /*skip=*/false); + BOOST_CHECK(success); + BOOST_CHECK_EQUAL(SpanToStr(sp), "MilkToastHoney"); + success = Const("Milk", sp); BOOST_CHECK(success); BOOST_CHECK_EQUAL(SpanToStr(sp), "ToastHoney"); + success = Const("Bread", sp, /*skip=*/false); + BOOST_CHECK(!success); + success = Const("Bread", sp); BOOST_CHECK(!success); + success = Const("Toast", sp, /*skip=*/false); + BOOST_CHECK(success); + BOOST_CHECK_EQUAL(SpanToStr(sp), "ToastHoney"); + success = Const("Toast", sp); BOOST_CHECK(success); BOOST_CHECK_EQUAL(SpanToStr(sp), "Honey"); @@ -1151,10 +1162,13 @@ BOOST_AUTO_TEST_CASE(test_script_parsing) success = Const("Honeybadger", sp); BOOST_CHECK(!success); + success = Const("Honey", sp, /*skip=*/false); + BOOST_CHECK(success); + BOOST_CHECK_EQUAL(SpanToStr(sp), "Honey"); + success = Const("Honey", sp); BOOST_CHECK(success); BOOST_CHECK_EQUAL(SpanToStr(sp), ""); - // Func(...): parse a function call, update span to argument if successful input = "Foo(Bar(xy,z()))"; sp = input; From 12bc1d0b1e9681c338c9d0df0bbac1d4a3162322 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 14 Apr 2025 13:31:31 -0700 Subject: [PATCH 035/390] util/string: Allow Split to include the separator When splitting a string, sometimes the separator needs to be included. Split will now optionally include the separator at the end of the left side of the splits, i.e. it appears at the end of the splits, except for the last one. Specifically, for musig() descriptors, Split is used to separate a musig() from any derivation path that follows it by splitting on the closing parentheses. Since that parentheses is needed for Func() and Expr(), Split() needs to preserve the end parentheses instead of discarding it. --- src/test/util_tests.cpp | 13 +++++++++++++ src/util/string.h | 20 ++++++++++++++++---- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 8016c2d8c566..f9038c4fc977 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -1246,6 +1246,12 @@ BOOST_AUTO_TEST_CASE(test_script_parsing) BOOST_CHECK_EQUAL(SpanToStr(results[1]), "two"); BOOST_CHECK_EQUAL(SpanToStr(results[2]), "three"); + results = Split(input, '#', /*include_sep=*/true); + BOOST_CHECK_EQUAL(results.size(), 3U); + BOOST_CHECK_EQUAL(SpanToStr(results[0]), "one#"); + BOOST_CHECK_EQUAL(SpanToStr(results[1]), "two#"); + BOOST_CHECK_EQUAL(SpanToStr(results[2]), "three"); + input = "*foo*bar*"; results = Split(input, '*'); BOOST_CHECK_EQUAL(results.size(), 4U); @@ -1253,6 +1259,13 @@ BOOST_AUTO_TEST_CASE(test_script_parsing) BOOST_CHECK_EQUAL(SpanToStr(results[1]), "foo"); BOOST_CHECK_EQUAL(SpanToStr(results[2]), "bar"); BOOST_CHECK_EQUAL(SpanToStr(results[3]), ""); + + results = Split(input, '*', /*include_sep=*/true); + BOOST_CHECK_EQUAL(results.size(), 4U); + BOOST_CHECK_EQUAL(SpanToStr(results[0]), "*"); + BOOST_CHECK_EQUAL(SpanToStr(results[1]), "foo*"); + BOOST_CHECK_EQUAL(SpanToStr(results[2]), "bar*"); + BOOST_CHECK_EQUAL(SpanToStr(results[3]), ""); } BOOST_AUTO_TEST_CASE(test_SplitString) diff --git a/src/util/string.h b/src/util/string.h index 4b8752562753..330c2a2a61e0 100644 --- a/src/util/string.h +++ b/src/util/string.h @@ -100,18 +100,30 @@ void ReplaceAll(std::string& in_out, const std::string& search, const std::strin * * If sep does not occur in sp, a singleton with the entirety of sp is returned. * + * @param[in] include_sep Whether to include the separator at the end of the left side of the splits. + * * Note that this function does not care about braces, so splitting * "foo(bar(1),2),3) on ',' will return {"foo(bar(1)", "2)", "3)"}. + * + * If include_sep == true, splitting "foo(bar(1),2),3) on ',' + * will return: + * - foo(bar(1), + * - 2), + * - 3) */ template > -std::vector Split(const std::span& sp, std::string_view separators) +std::vector Split(const std::span& sp, std::string_view separators, bool include_sep = false) { std::vector ret; auto it = sp.begin(); auto start = it; while (it != sp.end()) { if (separators.find(*it) != std::string::npos) { - ret.emplace_back(start, it); + if (include_sep) { + ret.emplace_back(start, it + 1); + } else { + ret.emplace_back(start, it); + } start = it + 1; } ++it; @@ -128,9 +140,9 @@ std::vector Split(const std::span& sp, std::string_view separator * "foo(bar(1),2),3) on ',' will return {"foo(bar(1)", "2)", "3)"}. */ template > -std::vector Split(const std::span& sp, char sep) +std::vector Split(const std::span& sp, char sep, bool include_sep = false) { - return Split(sp, std::string_view{&sep, 1}); + return Split(sp, std::string_view{&sep, 1}, include_sep); } [[nodiscard]] inline std::vector SplitString(std::string_view str, char sep) From 1894f975032013ef855c438654fbb745512e7982 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 15 Jan 2024 17:09:22 -0500 Subject: [PATCH 036/390] descriptors: Add PubkeyProvider::IsBIP32() --- src/script/descriptor.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 71645c874626..d5e4bc2195f2 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -221,6 +221,9 @@ struct PubkeyProvider /** Make a deep copy of this PubkeyProvider */ virtual std::unique_ptr Clone() const = 0; + + /** Whether this PubkeyProvider is a BIP 32 extended key that can be derived from */ + virtual bool IsBIP32() const = 0; }; class OriginPubkeyProvider final : public PubkeyProvider @@ -251,6 +254,7 @@ class OriginPubkeyProvider final : public PubkeyProvider } bool IsRange() const override { return m_provider->IsRange(); } size_t GetSize() const override { return m_provider->GetSize(); } + bool IsBIP32() const override { return m_provider->IsBIP32(); } std::string ToString(StringType type) const override { return "[" + OriginString(type) + "]" + m_provider->ToString(type); } bool ToPrivateString(const SigningProvider& arg, std::string& ret) const override { @@ -319,6 +323,7 @@ class ConstPubkeyProvider final : public PubkeyProvider } bool IsRange() const override { return false; } size_t GetSize() const override { return m_pubkey.size(); } + bool IsBIP32() const override { return false; } std::string ToString(StringType type) const override { return m_xonly ? HexStr(m_pubkey).substr(2) : HexStr(m_pubkey); } bool ToPrivateString(const SigningProvider& arg, std::string& ret) const override { @@ -406,6 +411,7 @@ class BIP32PubkeyProvider final : public PubkeyProvider BIP32PubkeyProvider(uint32_t exp_index, const CExtPubKey& extkey, KeyPath path, DeriveType derive, bool apostrophe) : PubkeyProvider(exp_index), m_root_extkey(extkey), m_path(std::move(path)), m_derive(derive), m_apostrophe(apostrophe) {} bool IsRange() const override { return m_derive != DeriveType::NO; } size_t GetSize() const override { return 33; } + bool IsBIP32() const override { return true; } std::optional GetPubKey(int pos, const SigningProvider& arg, FlatSigningProvider& out, const DescriptorCache* read_cache = nullptr, DescriptorCache* write_cache = nullptr) const override { KeyOriginInfo info; From fac0ee0bfc910a82678a3f8ec13c47967fd7def2 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 5 Nov 2024 15:09:55 -0500 Subject: [PATCH 037/390] build: Enable secp256k1 musig module --- src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 82bb4de460a0..1099ed39218f 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -31,7 +31,7 @@ message("Configuring secp256k1 subtree...") set(SECP256K1_DISABLE_SHARED ON CACHE BOOL "" FORCE) set(SECP256K1_ENABLE_MODULE_ECDH OFF CACHE BOOL "" FORCE) set(SECP256K1_ENABLE_MODULE_RECOVERY ON CACHE BOOL "" FORCE) -set(SECP256K1_ENABLE_MODULE_MUSIG OFF CACHE BOOL "" FORCE) +set(SECP256K1_ENABLE_MODULE_MUSIG ON CACHE BOOL "" FORCE) set(SECP256K1_BUILD_BENCHMARK OFF CACHE BOOL "" FORCE) set(SECP256K1_BUILD_TESTS ${BUILD_TESTS} CACHE BOOL "" FORCE) set(SECP256K1_BUILD_EXHAUSTIVE_TESTS ${BUILD_TESTS} CACHE BOOL "" FORCE) From 8ecea91bf296b8fae8b84c3dbf68d5703821cb79 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 22 Jan 2024 16:43:26 -0500 Subject: [PATCH 038/390] sign: Add GetMuSig2ParticipantPubkeys to SigningProvider --- src/script/signingprovider.cpp | 13 +++++++++++++ src/script/signingprovider.h | 4 ++++ 2 files changed, 17 insertions(+) diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp index 07a1956eabc4..792796b0f149 100644 --- a/src/script/signingprovider.cpp +++ b/src/script/signingprovider.cpp @@ -52,6 +52,11 @@ bool HidingSigningProvider::GetTaprootBuilder(const XOnlyPubKey& output_key, Tap { return m_provider->GetTaprootBuilder(output_key, builder); } +std::vector HidingSigningProvider::GetMuSig2ParticipantPubkeys(const CPubKey& pubkey) const +{ + if (m_hide_origin) return {}; + return m_provider->GetMuSig2ParticipantPubkeys(pubkey); +} bool FlatSigningProvider::GetCScript(const CScriptID& scriptid, CScript& script) const { return LookupHelper(scripts, scriptid, script); } bool FlatSigningProvider::GetPubKey(const CKeyID& keyid, CPubKey& pubkey) const { return LookupHelper(pubkeys, keyid, pubkey); } @@ -82,6 +87,13 @@ bool FlatSigningProvider::GetTaprootBuilder(const XOnlyPubKey& output_key, Tapro return LookupHelper(tr_trees, output_key, builder); } +std::vector FlatSigningProvider::GetMuSig2ParticipantPubkeys(const CPubKey& pubkey) const +{ + std::vector participant_pubkeys; + LookupHelper(aggregate_pubkeys, pubkey, participant_pubkeys); + return participant_pubkeys; +} + FlatSigningProvider& FlatSigningProvider::Merge(FlatSigningProvider&& b) { scripts.merge(b.scripts); @@ -89,6 +101,7 @@ FlatSigningProvider& FlatSigningProvider::Merge(FlatSigningProvider&& b) keys.merge(b.keys); origins.merge(b.origins); tr_trees.merge(b.tr_trees); + aggregate_pubkeys.merge(b.aggregate_pubkeys); return *this; } diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h index f4c823be393f..1da58bf6f928 100644 --- a/src/script/signingprovider.h +++ b/src/script/signingprovider.h @@ -161,6 +161,7 @@ class SigningProvider virtual bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const { return false; } virtual bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const { return false; } virtual bool GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const { return false; } + virtual std::vector GetMuSig2ParticipantPubkeys(const CPubKey& pubkey) const { return {}; } bool GetKeyByXOnly(const XOnlyPubKey& pubkey, CKey& key) const { @@ -204,6 +205,7 @@ class HidingSigningProvider : public SigningProvider bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override; bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const override; bool GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const override; + std::vector GetMuSig2ParticipantPubkeys(const CPubKey& pubkey) const override; }; struct FlatSigningProvider final : public SigningProvider @@ -213,6 +215,7 @@ struct FlatSigningProvider final : public SigningProvider std::map> origins; std::map keys; std::map tr_trees; /** Map from output key to Taproot tree (which can then make the TaprootSpendData */ + std::map> aggregate_pubkeys; /** MuSig2 aggregate pubkeys */ bool GetCScript(const CScriptID& scriptid, CScript& script) const override; bool GetPubKey(const CKeyID& keyid, CPubKey& pubkey) const override; @@ -221,6 +224,7 @@ struct FlatSigningProvider final : public SigningProvider bool GetKey(const CKeyID& keyid, CKey& key) const override; bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const override; bool GetTaprootBuilder(const XOnlyPubKey& output_key, TaprootBuilder& builder) const override; + std::vector GetMuSig2ParticipantPubkeys(const CPubKey& pubkey) const override; FlatSigningProvider& Merge(FlatSigningProvider&& b) LIFETIMEBOUND; }; From d00d95437dd113a23ccd556c25a77bb04bce23f7 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 22 Jan 2024 15:18:28 -0500 Subject: [PATCH 039/390] Add MuSig2 Keyagg Cache helper functions secp256k1 provides us secp256k1_musig_keyagg_cache objects which we are used as part of session info and to get the aggregate pubkey. These helper functions help us convert to/from the secp256k1 C objects into the Bitcoin Core C++ objects. --- src/CMakeLists.txt | 1 + src/musig.cpp | 53 ++++++++++++++++++++++++++++++++++++++++++++++ src/musig.h | 22 +++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100644 src/musig.cpp create mode 100644 src/musig.h diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 1099ed39218f..1dbabb57d67a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -161,6 +161,7 @@ add_library(bitcoin_common STATIC EXCLUDE_FROM_ALL key.cpp key_io.cpp merkleblock.cpp + musig.cpp net_permissions.cpp net_types.cpp netaddress.cpp diff --git a/src/musig.cpp b/src/musig.cpp new file mode 100644 index 000000000000..b33295431273 --- /dev/null +++ b/src/musig.cpp @@ -0,0 +1,53 @@ +// Copyright (c) 2024-present The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include + +bool GetMuSig2KeyAggCache(const std::vector& pubkeys, secp256k1_musig_keyagg_cache& keyagg_cache) +{ + // Parse the pubkeys + std::vector secp_pubkeys; + std::vector pubkey_ptrs; + for (const CPubKey& pubkey : pubkeys) { + if (!secp256k1_ec_pubkey_parse(secp256k1_context_static, &secp_pubkeys.emplace_back(), pubkey.data(), pubkey.size())) { + return false; + } + } + pubkey_ptrs.reserve(secp_pubkeys.size()); + for (const secp256k1_pubkey& p : secp_pubkeys) { + pubkey_ptrs.push_back(&p); + } + + // Aggregate the pubkey + if (!secp256k1_musig_pubkey_agg(secp256k1_context_static, nullptr, &keyagg_cache, pubkey_ptrs.data(), pubkey_ptrs.size())) { + return false; + } + return true; +} + +std::optional GetCPubKeyFromMuSig2KeyAggCache(secp256k1_musig_keyagg_cache& keyagg_cache) +{ + // Get the plain aggregated pubkey + secp256k1_pubkey agg_pubkey; + if (!secp256k1_musig_pubkey_get(secp256k1_context_static, &agg_pubkey, &keyagg_cache)) { + return std::nullopt; + } + + // Turn into CPubKey + unsigned char ser_agg_pubkey[CPubKey::COMPRESSED_SIZE]; + size_t ser_agg_pubkey_len = CPubKey::COMPRESSED_SIZE; + secp256k1_ec_pubkey_serialize(secp256k1_context_static, ser_agg_pubkey, &ser_agg_pubkey_len, &agg_pubkey, SECP256K1_EC_COMPRESSED); + return CPubKey(ser_agg_pubkey, ser_agg_pubkey + ser_agg_pubkey_len); +} + +std::optional MuSig2AggregatePubkeys(const std::vector& pubkeys) +{ + secp256k1_musig_keyagg_cache keyagg_cache; + if (!GetMuSig2KeyAggCache(pubkeys, keyagg_cache)) { + return std::nullopt; + } + return GetCPubKeyFromMuSig2KeyAggCache(keyagg_cache); +} diff --git a/src/musig.h b/src/musig.h new file mode 100644 index 000000000000..91aab1d3364a --- /dev/null +++ b/src/musig.h @@ -0,0 +1,22 @@ +// Copyright (c) 2024-present The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or https://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_MUSIG_H +#define BITCOIN_MUSIG_H + +#include + +#include +#include + +struct secp256k1_musig_keyagg_cache; + +//! Create a secp256k1_musig_keyagg_cache from the pubkeys in their current order. This is necessary for most MuSig2 operations +bool GetMuSig2KeyAggCache(const std::vector& pubkeys, secp256k1_musig_keyagg_cache& keyagg_cache); +//! Retrieve the full aggregate pubkey from the secp256k1_musig_keyagg_cache +std::optional GetCPubKeyFromMuSig2KeyAggCache(secp256k1_musig_keyagg_cache& cache); +//! Compute the full aggregate pubkey from the given participant pubkeys in their current order +std::optional MuSig2AggregatePubkeys(const std::vector& pubkeys); + +#endif // BITCOIN_MUSIG_H From eeeec1579ec5a3aa7b10ff62f87d197ae311a666 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Mon, 2 Jun 2025 13:46:42 +0200 Subject: [PATCH 040/390] rpc: Use type-safe exception to pass RPC help --- src/rpc/server.cpp | 7 ++----- src/rpc/util.cpp | 2 +- src/rpc/util.h | 4 ++++ 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 8631ae0adfef..8d120f41ef40 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -97,11 +97,8 @@ std::string CRPCTable::help(const std::string& strCommand, const JSONRPCRequest& UniValue unused_result; if (setDone.insert(pcmd->unique_id).second) pcmd->actor(jreq, unused_result, /*last_handler=*/true); - } - catch (const std::exception& e) - { - // Help text is returned in an exception - std::string strHelp = std::string(e.what()); + } catch (const HelpResult& e) { + std::string strHelp{e.what()}; if (strCommand == "") { if (strHelp.find('\n') != std::string::npos) diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index 5ddc02c94eea..5da02b4df4e4 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -645,7 +645,7 @@ UniValue RPCHelpMan::HandleRequest(const JSONRPCRequest& request) const * the user is asking for help information, and throw help when appropriate. */ if (request.mode == JSONRPCRequest::GET_HELP || !IsValidNumArgs(request.params.size())) { - throw std::runtime_error(ToString()); + throw HelpResult{ToString()}; } UniValue arg_mismatch{UniValue::VOBJ}; for (size_t i{0}; i < m_args.size(); ++i) { diff --git a/src/rpc/util.h b/src/rpc/util.h index b2387a4c8eff..1650d2da1f6e 100644 --- a/src/rpc/util.h +++ b/src/rpc/util.h @@ -67,6 +67,10 @@ class FillableSigningProvider; class CScript; struct Sections; +struct HelpResult : std::runtime_error { + explicit HelpResult(const std::string& msg) : std::runtime_error{msg} {} +}; + /** * Gets all existing output types formatted for RPC help sections. * From fa946520d229ae45b30519bccc9eaa2c47b4a093 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Mon, 2 Jun 2025 14:16:06 +0200 Subject: [PATCH 041/390] refactor: Use structured binding for-loop --- src/rpc/server.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 8d120f41ef40..bd99615bbf0a 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -1,5 +1,5 @@ // Copyright (c) 2010 Satoshi Nakamoto -// Copyright (c) 2009-2022 The Bitcoin Core developers +// Copyright (c) 2009-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -79,15 +80,13 @@ std::string CRPCTable::help(const std::string& strCommand, const JSONRPCRequest& for (const auto& entry : mapCommands) vCommands.emplace_back(entry.second.front()->category + entry.first, entry.second.front()); - sort(vCommands.begin(), vCommands.end()); + std::ranges::sort(vCommands); JSONRPCRequest jreq = helpreq; jreq.mode = JSONRPCRequest::GET_HELP; jreq.params = UniValue(); - for (const std::pair& command : vCommands) - { - const CRPCCommand *pcmd = command.second; + for (const auto& [_, pcmd] : vCommands) { std::string strMethod = pcmd->name; if ((strCommand != "" || pcmd->category == "hidden") && strMethod != strCommand) continue; From 3c39a55e64bedfc384f6a7b9de1410bc8b46a9bb Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Thu, 18 Jul 2024 12:53:57 -0400 Subject: [PATCH 042/390] validation: Add ancestors of reconsiderblock to setBlockIndexCandidates When we call reconsiderblock for some block, ResetBlockFailureFlags puts the descendants of that block into setBlockIndexCandidates (if they meet the criteria, i.e. have more work than the tip etc.) We also clear the failure flags of the ancestors, but we never put any of those into setBlockIndexCandidates this is wrong and could lead to failures in CheckBlockIndex(). --- src/validation.cpp | 13 ++----------- src/validation.h | 2 +- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index d5e92600d4f3..495154ad616c 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3849,9 +3849,9 @@ void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) { int nHeight = pindex->nHeight; - // Remove the invalidity flag from this block and all its descendants. + // Remove the invalidity flag from this block and all its descendants and ancestors. for (auto& [_, block_index] : m_blockman.m_block_index) { - if (!block_index.IsValid() && block_index.GetAncestor(nHeight) == pindex) { + if (!block_index.IsValid() && (block_index.GetAncestor(nHeight) == pindex || pindex->GetAncestor(block_index.nHeight) == &block_index)) { block_index.nStatus &= ~BLOCK_FAILED_MASK; m_blockman.m_dirty_blockindex.insert(&block_index); if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveNumChainTxs() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) { @@ -3863,15 +3863,6 @@ void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) { } } } - - // Remove the invalidity flag from all ancestors too. - while (pindex != nullptr) { - if (pindex->nStatus & BLOCK_FAILED_MASK) { - pindex->nStatus &= ~BLOCK_FAILED_MASK; - m_blockman.m_dirty_blockindex.insert(pindex); - } - pindex = pindex->pprev; - } } void Chainstate::TryAddBlockIndexCandidate(CBlockIndex* pindex) diff --git a/src/validation.h b/src/validation.h index c7e70e3adbc0..8ba126b9931e 100644 --- a/src/validation.h +++ b/src/validation.h @@ -731,7 +731,7 @@ class Chainstate /** Set invalidity status to all descendants of a block */ void SetBlockFailureFlags(CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); - /** Remove invalidity status from a block and its descendants. */ + /** Remove invalidity status from a block, its descendants and ancestors and reconsider them for activation */ void ResetBlockFailureFlags(CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Replay blocks that aren't fully applied to the database. */ From 86d98b94e5469e6476dc87f54b8ac25074603f0c Mon Sep 17 00:00:00 2001 From: stratospher <44024636+stratospher@users.noreply.github.com> Date: Fri, 28 Feb 2025 23:45:53 +0530 Subject: [PATCH 043/390] test: verify that ancestors of a reconsidered block can become the chain tip when we reconsiderblock, previously only block and it's descendants were considered as chain tip candidates/inserted into setBlockIndexCandidates ex: on this chain, with block 4 invalidated 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> header 7 blocks 4, 5, 6, header 7 have BLOCK_FAILED_* flags set previously: - if we reconsiderblock header 7, the chain would have all the BLOCK_FAILED_* flags cleared but would report chain tip as block 3. - after restart, it reports correct chain tip block 6. now: - if we reconsiderblock header 7, the correct chain tip block 6 is reported since ancestors are also considered as chain tip candidates/inserted into setBlockIndexCandidates. Co-authored-by: Martin Zumsande --- test/functional/rpc_invalidateblock.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/test/functional/rpc_invalidateblock.py b/test/functional/rpc_invalidateblock.py index e2eb033f09c7..031f1dd86ce9 100755 --- a/test/functional/rpc_invalidateblock.py +++ b/test/functional/rpc_invalidateblock.py @@ -85,6 +85,23 @@ def run_test(self): self.wait_until(lambda: self.nodes[0].getblockcount() == 4, timeout=5) self.wait_until(lambda: self.nodes[1].getblockcount() == 4, timeout=5) + self.log.info("Verify that ancestors can become chain tip candidates when we reconsider blocks") + # Invalidate node0's current chain (1' -> 2' -> 3' -> 4') so that we don't reorg back to it in this test + badhash = self.nodes[0].getblockhash(1) + self.nodes[0].invalidateblock(badhash) + # Reconsider the tip so that node0's chain becomes this chain again : 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> header 7 + self.nodes[0].reconsiderblock(tip) + blockhash_3 = self.nodes[0].getblockhash(3) + blockhash_4 = self.nodes[0].getblockhash(4) + blockhash_6 = self.nodes[0].getblockhash(6) + assert_equal(self.nodes[0].getbestblockhash(), blockhash_6) + + # Invalidate block 4 so that chain becomes : 1 -> 2 -> 3 + self.nodes[0].invalidateblock(blockhash_4) + assert_equal(self.nodes[0].getbestblockhash(), blockhash_3) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 3) + assert_equal(self.nodes[0].getblockchaininfo()['headers'], 3) + self.log.info("Verify that we reconsider all ancestors as well") blocks = self.generatetodescriptor(self.nodes[1], 10, ADDRESS_BCRT1_UNSPENDABLE_DESCRIPTOR, sync_fun=self.no_op) assert_equal(self.nodes[1].getbestblockhash(), blocks[-1]) @@ -111,6 +128,14 @@ def run_test(self): # Should report consistent blockchain info assert_equal(self.nodes[1].getblockchaininfo()["headers"], self.nodes[1].getblockchaininfo()["blocks"]) + # Reconsider the header + self.nodes[0].reconsiderblock(block.hash) + # Since header doesn't have block data, it can't be chain tip + # Check if it's possible for an ancestor (with block data) to be the chain tip + assert_equal(self.nodes[0].getbestblockhash(), blockhash_6) + assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 6) + assert_equal(self.nodes[0].getblockchaininfo()['headers'], 7) + self.log.info("Verify that invalidating an unknown block throws an error") assert_raises_rpc_error(-5, "Block not found", self.nodes[1].invalidateblock, "00" * 32) assert_equal(self.nodes[1].getbestblockhash(), blocks[-1]) From 8cc3ac6c2328873e57c31f237d194c5f22b6748a Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Mon, 24 Mar 2025 14:54:55 -0400 Subject: [PATCH 044/390] validation: Don't use IsValid() to filter for invalid blocks IsValid() also returns false for blocks that have not been validated yet up to the default validity level of BLOCK_VALID_TRANSACTIONS but are not marked as invalid - e.g. if we only know the header. Here, we specifically want to filter for invalid blocks. Also removes the default arg from IsValid() which is now unused outside of tests, to prevent this kind of misuse for the future. Co-authored-by: TheCharlatan --- src/chain.h | 2 +- src/test/fuzz/chain.cpp | 2 +- src/validation.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/chain.h b/src/chain.h index c6d1640768ee..f5bfdb2fb4b0 100644 --- a/src/chain.h +++ b/src/chain.h @@ -292,7 +292,7 @@ class CBlockIndex std::string ToString() const; //! Check whether this block index entry is valid up to the passed validity level. - bool IsValid(enum BlockStatus nUpTo = BLOCK_VALID_TRANSACTIONS) const + bool IsValid(enum BlockStatus nUpTo) const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); diff --git a/src/test/fuzz/chain.cpp b/src/test/fuzz/chain.cpp index 0363f317b63f..09053e4815c0 100644 --- a/src/test/fuzz/chain.cpp +++ b/src/test/fuzz/chain.cpp @@ -30,7 +30,7 @@ FUZZ_TARGET(chain) (void)disk_block_index->GetMedianTimePast(); (void)disk_block_index->GetUndoPos(); (void)disk_block_index->HaveNumChainTxs(); - (void)disk_block_index->IsValid(); + (void)disk_block_index->IsValid(BLOCK_VALID_TRANSACTIONS); } const CBlockHeader block_header = disk_block_index->GetBlockHeader(); diff --git a/src/validation.cpp b/src/validation.cpp index 495154ad616c..72ed6e8fe90a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3851,7 +3851,7 @@ void Chainstate::ResetBlockFailureFlags(CBlockIndex *pindex) { // Remove the invalidity flag from this block and all its descendants and ancestors. for (auto& [_, block_index] : m_blockman.m_block_index) { - if (!block_index.IsValid() && (block_index.GetAncestor(nHeight) == pindex || pindex->GetAncestor(block_index.nHeight) == &block_index)) { + if ((block_index.nStatus & BLOCK_FAILED_MASK) && (block_index.GetAncestor(nHeight) == pindex || pindex->GetAncestor(block_index.nHeight) == &block_index)) { block_index.nStatus &= ~BLOCK_FAILED_MASK; m_blockman.m_dirty_blockindex.insert(&block_index); if (block_index.IsValid(BLOCK_VALID_TRANSACTIONS) && block_index.HaveNumChainTxs() && setBlockIndexCandidates.value_comp()(m_chain.Tip(), &block_index)) { From dd8447f70faf6419b4617da3c1b57098e9cd66a6 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Fri, 13 Jun 2025 01:24:50 +0200 Subject: [PATCH 045/390] test: fix catchup loop in outbound eviction functional test The catchup loop in the outbound eviction functional test currently has a small flaw, as the contained waiting for a `getheaders` message just waits for any such message instead of one with the intended block hash. The reason is that the `prev_prev_hash` variable is set incorrectly, since the `tip_header` instance is not updated and its field `.hash` is None. Fix that by updating `tip_header` and use the correct field -- we want the tip header's previous hash (`.hashPrevBlock`). --- test/functional/p2p_outbound_eviction.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/functional/p2p_outbound_eviction.py b/test/functional/p2p_outbound_eviction.py index 30ac85e32f31..3d6f154a34ad 100755 --- a/test/functional/p2p_outbound_eviction.py +++ b/test/functional/p2p_outbound_eviction.py @@ -88,6 +88,7 @@ def test_outbound_eviction_unprotected(self): # Generate an additional block so the peers is 2 blocks behind prev_header = from_hex(CBlockHeader(), node.getblockheader(best_block_hash, False)) best_block_hash = self.generateblock(node, output="raw(42)", transactions=[])["hash"] + tip_header = from_hex(CBlockHeader(), node.getblockheader(best_block_hash, False)) peer.sync_with_ping() # Advance time but not enough to evict the peer @@ -100,7 +101,7 @@ def test_outbound_eviction_unprotected(self): # Send a header with the previous tip (so we go back to 1 block behind) peer.send_and_ping(msg_headers([prev_header])) - prev_prev_hash = tip_header.hash + prev_prev_hash = tip_header.hashPrevBlock self.log.info("Create an outbound connection and take some time to catch up, but do it in time") # Check that if the peer manages to catch up within time, the timeouts are removed (and the peer is not disconnected) From 9dfc61d95f0082672a9b90528386e6bcd7014a78 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Fri, 13 Jun 2025 09:40:23 +0200 Subject: [PATCH 046/390] test: detect no external signer connected --- test/functional/mocks/no_signer.py | 29 +++++++++++++++++++++++++++++ test/functional/wallet_signer.py | 27 +++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100755 test/functional/mocks/no_signer.py diff --git a/test/functional/mocks/no_signer.py b/test/functional/mocks/no_signer.py new file mode 100755 index 000000000000..04318dac793c --- /dev/null +++ b/test/functional/mocks/no_signer.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +import argparse +import json +import sys + +def enumerate(args): + sys.stdout.write(json.dumps([])) + +parser = argparse.ArgumentParser(prog='./no_signer.py', description='No external signer connected mock') + +subparsers = parser.add_subparsers(description='Commands', dest='command') +subparsers.required = True + +parser_enumerate = subparsers.add_parser('enumerate', help='list available signers') +parser_enumerate.set_defaults(func=enumerate) + + +if not sys.stdin.isatty(): + buffer = sys.stdin.read() + if buffer and buffer.rstrip() != "": + sys.argv.extend(buffer.rstrip().split(" ")) + +args = parser.parse_args() + +args.func(args) diff --git a/test/functional/wallet_signer.py b/test/functional/wallet_signer.py index c9ac876eb71e..208e4c077529 100755 --- a/test/functional/wallet_signer.py +++ b/test/functional/wallet_signer.py @@ -23,6 +23,10 @@ def mock_signer_path(self): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py') return sys.executable + " " + path + def mock_no_connected_signer_path(self): + path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'no_signer.py') + return sys.executable + " " + path + def mock_invalid_signer_path(self): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'invalid_signer.py') return sys.executable + " " + path @@ -52,6 +56,7 @@ def clear_mock_result(self, node): def run_test(self): self.test_valid_signer() + self.test_disconnected_signer() self.restart_node(1, [f"-signer={self.mock_invalid_signer_path()}", "-keypool=10"]) self.test_invalid_signer() self.restart_node(1, [f"-signer={self.mock_multi_signers_path()}", "-keypool=10"]) @@ -234,6 +239,28 @@ def test_valid_signer(self): # ) # self.clear_mock_result(self.nodes[4]) + def test_disconnected_signer(self): + self.log.info('Test disconnected external signer') + + # First create a wallet with the signer connected + self.nodes[1].createwallet(wallet_name='hww_disconnect', disable_private_keys=True, external_signer=True) + hww = self.nodes[1].get_wallet_rpc('hww_disconnect') + assert_equal(hww.getwalletinfo()["external_signer"], True) + + # Fund wallet + self.nodes[0].sendtoaddress(hww.getnewaddress(address_type="bech32m"), 1) + self.generate(self.nodes[0], 1) + + # Restart node with no signer connected + self.log.debug(f"-signer={self.mock_no_connected_signer_path()}") + self.restart_node(1, [f"-signer={self.mock_no_connected_signer_path()}", "-keypool=10"]) + self.nodes[1].loadwallet('hww_disconnect') + hww = self.nodes[1].get_wallet_rpc('hww_disconnect') + + # Try to spend + dest = hww.getrawchangeaddress() + assert_raises_rpc_error(-25, "External signer not found", hww.send, outputs=[{dest:0.5}]) + def test_invalid_signer(self): self.log.debug(f"-signer={self.mock_invalid_signer_path()}") self.log.info('Test invalid external signer') From fac49094cdb12bd566df5a00832462491a839f81 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 6 Jun 2025 15:34:18 +0200 Subject: [PATCH 047/390] test: Remove duplicate ConfigParser It is sufficient to parse once. --- test/functional/rpc_users.py | 5 +---- test/functional/test_framework/test_framework.py | 9 +++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py index 75507877d545..9e475bbe853f 100755 --- a/test/functional/rpc_users.py +++ b/test/functional/rpc_users.py @@ -17,7 +17,6 @@ import subprocess from random import SystemRandom import string -import configparser import sys from typing import Optional @@ -47,9 +46,7 @@ def conf_setup(self): self.rpcuser = "rpcuser💻" self.rpcpassword = "rpcpassword🔑" - config = configparser.ConfigParser() - config.read_file(open(self.options.configfile)) - gen_rpcauth = config['environment']['RPCAUTH'] + gen_rpcauth = self.config["environment"]["RPCAUTH"] # Generate RPCAUTH with specified password self.rt2password = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 6ae2cd07185d..a329c66971ce 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -272,9 +272,8 @@ def parse_args(self, test_file): self.options.timeout_factor = self.options.timeout_factor or (4 if self.options.valgrind else 1) self.options.previous_releases_path = previous_releases_path - config = configparser.ConfigParser() - config.read_file(open(self.options.configfile)) - self.config = config + self.config = configparser.ConfigParser() + self.config.read_file(open(self.options.configfile)) self.binary_paths = self.get_binary_paths() if self.options.v1transport: self.options.v2transport=False @@ -314,10 +313,8 @@ def setup(self): self.options.cachedir = os.path.abspath(self.options.cachedir) - config = self.config - os.environ['PATH'] = os.pathsep.join([ - os.path.join(config['environment']['BUILDDIR'], 'bin'), + os.path.join(self.config["environment"]["BUILDDIR"], "bin"), os.environ['PATH'] ]) From fa91835ec6ad723dc4a379a900ae9331e07a0fba Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Sat, 7 Jun 2025 10:09:30 +0200 Subject: [PATCH 048/390] test: Use lowercase env var as attribute name This is a refactor. --- test/functional/test_framework/test_framework.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index a329c66971ce..4b15a7c3202e 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -285,19 +285,19 @@ def get_binary_paths(self): paths = types.SimpleNamespace() binaries = { - "bitcoind": ("bitcoind", "BITCOIND"), - "bitcoin-cli": ("bitcoincli", "BITCOINCLI"), - "bitcoin-util": ("bitcoinutil", "BITCOINUTIL"), - "bitcoin-chainstate": ("bitcoinchainstate", "BITCOINCHAINSTATE"), - "bitcoin-wallet": ("bitcoinwallet", "BITCOINWALLET"), + "bitcoind": "BITCOIND", + "bitcoin-cli": "BITCOINCLI", + "bitcoin-util": "BITCOINUTIL", + "bitcoin-chainstate": "BITCOINCHAINSTATE", + "bitcoin-wallet": "BITCOINWALLET", } - for binary, [attribute_name, env_variable_name] in binaries.items(): + for binary, env_variable_name in binaries.items(): default_filename = os.path.join( self.config["environment"]["BUILDDIR"], "bin", binary + self.config["environment"]["EXEEXT"], ) - setattr(paths, attribute_name, os.getenv(env_variable_name, default=default_filename)) + setattr(paths, env_variable_name.lower(), os.getenv(env_variable_name, default=default_filename)) # BITCOIN_CMD environment variable can be specified to invoke bitcoin # wrapper binary instead of other executables. paths.bitcoin_cmd = shlex.split(os.getenv("BITCOIN_CMD", "")) or None From fac9db6eb0c64333cd202e1053a4dd5fd27344f1 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Sat, 7 Jun 2025 10:24:34 +0200 Subject: [PATCH 049/390] test: Add missing tx util to Binaries --- .github/workflows/ci.yml | 1 + test/functional/test_framework/test_framework.py | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 69c119d73719..63d18a934946 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -260,6 +260,7 @@ jobs: env: BITCOIND: '${{ github.workspace }}\build\bin\Release\bitcoind.exe' BITCOINCLI: '${{ github.workspace }}\build\bin\Release\bitcoin-cli.exe' + BITCOINTX: '${{ github.workspace }}\build\bin\Release\bitcoin-tx.exe' BITCOINUTIL: '${{ github.workspace }}\build\bin\Release\bitcoin-util.exe' BITCOINWALLET: '${{ github.workspace }}\build\bin\Release\bitcoin-wallet.exe' TEST_RUNNER_EXTRA: ${{ github.event_name != 'pull_request' && '--extended' || '' }} diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 4b15a7c3202e..5867398e1f77 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -84,6 +84,10 @@ def rpc_argv(self): # Add -nonamed because "bitcoin rpc" enables -named by default, but bitcoin-cli doesn't return self._argv("rpc", self.paths.bitcoincli) + ["-nonamed"] + def tx_argv(self): + "Return argv array that should be used to invoke bitcoin-tx" + return self._argv("tx", self.paths.bitcointx) + def util_argv(self): "Return argv array that should be used to invoke bitcoin-util" return self._argv("util", self.paths.bitcoinutil) @@ -288,6 +292,7 @@ def get_binary_paths(self): "bitcoind": "BITCOIND", "bitcoin-cli": "BITCOINCLI", "bitcoin-util": "BITCOINUTIL", + "bitcoin-tx": "BITCOINTX", "bitcoin-chainstate": "BITCOINCHAINSTATE", "bitcoin-wallet": "BITCOINWALLET", } From 5d235d50d6dd0cc23175a1484e8ebb6cdc6e2183 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 28 May 2025 12:55:46 +0200 Subject: [PATCH 050/390] net: assert block hash in `ProcessGetBlockData` and `ProcessMessage` The non-recent-block code path in `ProcessGetBlockData` already has `inv.hash` available (equaling `pindex->GetBlockHash()`). Pass it to `ReadBlock()` and assert that the on-disk header matches the requested hash. The `GETBLOCKTXN` message handler in `ProcessMessage` receives `req.blockhash` from the peer (equaling `pindex->GetBlockHash()`). Pass this hash to `ReadBlock()` for verification and assert that the index lookup matches. Co-authored-by: TheCharlatan Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> --- src/net_processing.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index ead69f086df9..8649f564f7cb 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2298,7 +2298,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& } std::shared_ptr pblock; - if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) { + if (a_recent_block && a_recent_block->GetHash() == inv.hash) { pblock = a_recent_block; } else if (inv.IsMsgWitnessBlk()) { // Fast-path: in this case it is possible to serve the block directly from disk, @@ -2318,7 +2318,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& } else { // Send block from disk std::shared_ptr pblockRead = std::make_shared(); - if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos)) { + if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos, inv.hash)) { if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); } else { @@ -2364,7 +2364,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& // and we don't feel like constructing the object for them, so // instead we respond with the full, non-compact block. if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) { - if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { + if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == inv.hash) { MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block); } else { CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()}; @@ -4173,7 +4173,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (!block_pos.IsNull()) { CBlock block; - const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos)}; + const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos, req.blockhash)}; // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get // pruned after we release cs_main above, so this read should never fail. assert(ret); From 2371b9f4ee0b108ebbb8afedc47d73ce0f97d272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 28 May 2025 14:11:32 +0200 Subject: [PATCH 051/390] test/bench: verify hash in `ComputeFilter` reads Switch to the index-aware `ReadBlock()` overload in `ComputeFilter` so that filter creation will abort if the stored block header hash doesn't match the expected one. In the `readwriteblock` benchmark, pass the expected hash to `ReadBlock()` to match the new signature without affecting benchmark performance. --- src/bench/readwriteblock.cpp | 8 +++++--- src/test/util/blockfilter.cpp | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/bench/readwriteblock.cpp b/src/bench/readwriteblock.cpp index 10434b15d200..c0537b1fa699 100644 --- a/src/bench/readwriteblock.cpp +++ b/src/bench/readwriteblock.cpp @@ -42,10 +42,12 @@ static void ReadBlockBench(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; auto& blockman{testing_setup->m_node.chainman->m_blockman}; - const auto pos{blockman.WriteBlock(CreateTestBlock(), 413'567)}; - CBlock block; + const auto& test_block{CreateTestBlock()}; + const auto& expected_hash{test_block.GetHash()}; + const auto& pos{blockman.WriteBlock(test_block, 413'567)}; bench.run([&] { - const auto success{blockman.ReadBlock(block, pos)}; + CBlock block; + const auto success{blockman.ReadBlock(block, pos, expected_hash)}; assert(success); }); } diff --git a/src/test/util/blockfilter.cpp b/src/test/util/blockfilter.cpp index 8a17f7f4f512..8716af9fdaa7 100644 --- a/src/test/util/blockfilter.cpp +++ b/src/test/util/blockfilter.cpp @@ -17,7 +17,7 @@ bool ComputeFilter(BlockFilterType filter_type, const CBlockIndex& block_index, LOCK(::cs_main); CBlock block; - if (!blockman.ReadBlock(block, block_index.GetBlockPos())) { + if (!blockman.ReadBlock(block, block_index)) { return false; } From 9341b5333ad54ccdb7c16802ff06c51b956948e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 28 May 2025 14:12:04 +0200 Subject: [PATCH 052/390] blockstorage: make block read hash checks explicit Dropped the default expected_hash parameter from `ReadBlock()`. In `blockmanager_flush_block_file` tests, we pass {} since the tests would already fail at PoW validation for corrupted blocks. In `ChainstateManager::LoadExternalBlockFile`, we pass {} when processing child blocks because their hashes aren't known beforehand. --- src/node/blockstorage.h | 2 +- src/test/blockmanager_tests.cpp | 6 +++--- src/validation.cpp | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 9405f68c6cff..0e4c13debbc6 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -411,7 +411,7 @@ class BlockManager void UnlinkPrunedFiles(const std::set& setFilesToPrune) const; /** Functions for disk access for blocks */ - bool ReadBlock(CBlock& block, const FlatFilePos& pos, const std::optional& expected_hash = {}) const; + bool ReadBlock(CBlock& block, const FlatFilePos& pos, const std::optional& expected_hash) const; bool ReadBlock(CBlock& block, const CBlockIndex& index) const; bool ReadRawBlock(std::vector& block, const FlatFilePos& pos) const; diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp index d1c64e786dce..d3b581437191 100644 --- a/src/test/blockmanager_tests.cpp +++ b/src/test/blockmanager_tests.cpp @@ -190,12 +190,12 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) BOOST_CHECK_EQUAL(read_block.nVersion, 0); { ASSERT_DEBUG_LOG("Errors in block header"); - BOOST_CHECK(!blockman.ReadBlock(read_block, pos1)); + BOOST_CHECK(!blockman.ReadBlock(read_block, pos1, {})); BOOST_CHECK_EQUAL(read_block.nVersion, 1); } { ASSERT_DEBUG_LOG("Errors in block header"); - BOOST_CHECK(!blockman.ReadBlock(read_block, pos2)); + BOOST_CHECK(!blockman.ReadBlock(read_block, pos2, {})); BOOST_CHECK_EQUAL(read_block.nVersion, 2); } @@ -212,7 +212,7 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + STORAGE_HEADER_BYTES) * 2); // Block 2 was not overwritten: - blockman.ReadBlock(read_block, pos2); + BOOST_CHECK(!blockman.ReadBlock(read_block, pos2, {})); BOOST_CHECK_EQUAL(read_block.nVersion, 2); } diff --git a/src/validation.cpp b/src/validation.cpp index 18c775a9db96..35f66271c45a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -5168,14 +5168,14 @@ void ChainstateManager::LoadExternalBlockFile( while (range.first != range.second) { std::multimap::iterator it = range.first; std::shared_ptr pblockrecursive = std::make_shared(); - if (m_blockman.ReadBlock(*pblockrecursive, it->second)) { - LogDebug(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(), - head.ToString()); + if (m_blockman.ReadBlock(*pblockrecursive, it->second, {})) { + const auto& block_hash{pblockrecursive->GetHash()}; + LogDebug(BCLog::REINDEX, "%s: Processing out of order child %s of %s", __func__, block_hash.ToString(), head.ToString()); LOCK(cs_main); BlockValidationState dummy; if (AcceptBlock(pblockrecursive, dummy, nullptr, true, &it->second, nullptr, true)) { nLoaded++; - queue.push_back(pblockrecursive->GetHash()); + queue.push_back(block_hash); } } range.first++; From fa955154c773c5129f9594982343b440d05957e2 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 13 Jun 2025 13:12:41 +0200 Subject: [PATCH 053/390] test: Add missing skip_if_no_bitcoin_tx --- test/CMakeLists.txt | 1 + test/config.ini.in | 1 + test/functional/test_framework/test_framework.py | 9 +++++++++ 3 files changed, 11 insertions(+) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 7de034073547..ab67b1f1d861 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -17,6 +17,7 @@ function(create_test_config) set_configure_variable(ENABLE_WALLET ENABLE_WALLET) set_configure_variable(BUILD_CLI BUILD_BITCOIN_CLI) + set_configure_variable(BUILD_TX BUILD_BITCOIN_TX) set_configure_variable(BUILD_UTIL BUILD_BITCOIN_UTIL) set_configure_variable(BUILD_UTIL_CHAINSTATE BUILD_BITCOIN_CHAINSTATE) set_configure_variable(BUILD_WALLET_TOOL BUILD_BITCOIN_WALLET) diff --git a/test/config.ini.in b/test/config.ini.in index 9a297cb3391a..6865fa8a47d3 100644 --- a/test/config.ini.in +++ b/test/config.ini.in @@ -17,6 +17,7 @@ RPCAUTH=@abs_top_srcdir@/share/rpcauth/rpcauth.py # Which components are enabled. These are commented out by cmake if they were disabled during configuration. @ENABLE_WALLET_TRUE@ENABLE_WALLET=true @BUILD_BITCOIN_CLI_TRUE@ENABLE_CLI=true +@BUILD_BITCOIN_TX_TRUE@BUILD_BITCOIN_TX=true @BUILD_BITCOIN_UTIL_TRUE@ENABLE_BITCOIN_UTIL=true @BUILD_BITCOIN_CHAINSTATE_TRUE@ENABLE_BITCOIN_CHAINSTATE=true @BUILD_BITCOIN_WALLET_TRUE@ENABLE_WALLET_TOOL=true diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 5867398e1f77..1014a608ab90 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -1002,6 +1002,11 @@ def skip_if_no_wallet_tool(self): if not self.is_wallet_tool_compiled(): raise SkipTest("bitcoin-wallet has not been compiled") + def skip_if_no_bitcoin_tx(self): + """Skip the running test if bitcoin-tx has not been compiled.""" + if not self.is_bitcoin_tx_compiled(): + raise SkipTest("bitcoin-tx has not been compiled") + def skip_if_no_bitcoin_util(self): """Skip the running test if bitcoin-util has not been compiled.""" if not self.is_bitcoin_util_compiled(): @@ -1056,6 +1061,10 @@ def is_wallet_tool_compiled(self): """Checks whether bitcoin-wallet was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET_TOOL") + def is_bitcoin_tx_compiled(self): + """Checks whether bitcoin-tx was compiled.""" + return self.config["components"].getboolean("BUILD_BITCOIN_TX") + def is_bitcoin_util_compiled(self): """Checks whether bitcoin-util was compiled.""" return self.config["components"].getboolean("ENABLE_BITCOIN_UTIL") From faa18bf287fc02339b7af29d54af862a289bf96d Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Sat, 7 Jun 2025 11:15:09 +0200 Subject: [PATCH 054/390] test: Turn util/test_runner into functional test The moved portion can be reviewed via: --color-moved=dimmed-zebra --color-moved-ws=ignore-all-space --- .github/workflows/ci.yml | 3 - CMakeLists.txt | 2 +- cmake/tests.cmake | 6 -- test/CMakeLists.txt | 2 +- test/README.md | 8 +- test/functional/test_runner.py | 1 + test/functional/tool_utils.py | 147 ++++++++++++++++++++++++++ test/util/test_runner.py | 181 --------------------------------- 8 files changed, 151 insertions(+), 199 deletions(-) create mode 100755 test/functional/tool_utils.py delete mode 100755 test/util/test_runner.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 63d18a934946..2d4985c99745 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -390,9 +390,6 @@ jobs: (Get-Content "test/config.ini") -replace '(?<=^SRCDIR=).*', '${{ github.workspace }}' -replace '(?<=^BUILDDIR=).*', '${{ github.workspace }}' -replace '(?<=^RPCAUTH=).*', '${{ github.workspace }}/share/rpcauth/rpcauth.py' | Set-Content "test/config.ini" Get-Content "test/config.ini" - - name: Run util tests - run: py -3 test/util/test_runner.py - - name: Run rpcauth test run: py -3 test/util/rpcauth-test.py diff --git a/CMakeLists.txt b/CMakeLists.txt index f7241020815c..18d23142a104 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -594,7 +594,7 @@ if(Python3_EXECUTABLE) set(PYTHON_COMMAND ${Python3_EXECUTABLE}) else() list(APPEND configure_warnings - "Minimum required Python not found. Utils and rpcauth tests are disabled." + "Minimum required Python not found. Rpcauth tests are disabled." ) endif() diff --git a/cmake/tests.cmake b/cmake/tests.cmake index 279132980017..46104593c85c 100644 --- a/cmake/tests.cmake +++ b/cmake/tests.cmake @@ -2,12 +2,6 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or https://opensource.org/license/mit/. -if(TARGET bitcoin-util AND TARGET bitcoin-tx AND PYTHON_COMMAND) - add_test(NAME util_test_runner - COMMAND ${CMAKE_COMMAND} -E env BITCOINUTIL=$ BITCOINTX=$ ${PYTHON_COMMAND} ${PROJECT_BINARY_DIR}/test/util/test_runner.py - ) -endif() - if(PYTHON_COMMAND) add_test(NAME util_rpcauth_test COMMAND ${PYTHON_COMMAND} ${PROJECT_BINARY_DIR}/test/util/rpcauth-test.py diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index ab67b1f1d861..b7fde825604e 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -37,7 +37,7 @@ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/fuzz) file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/util) file(GLOB_RECURSE functional_tests RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} functional/*) -foreach(script ${functional_tests} fuzz/test_runner.py util/rpcauth-test.py util/test_runner.py) +foreach(script ${functional_tests} fuzz/test_runner.py util/rpcauth-test.py) if(CMAKE_HOST_WIN32) set(symlink) else() diff --git a/test/README.md b/test/README.md index 74adc644b838..843777da06ec 100644 --- a/test/README.md +++ b/test/README.md @@ -10,10 +10,9 @@ This directory contains the following sets of tests: - [functional](/test/functional) which test the functionality of bitcoind and bitcoin-qt by interacting with them through the RPC and P2P interfaces. -- [util](/test/util) which tests the utilities (bitcoin-util, bitcoin-tx, ...). - [lint](/test/lint/) which perform various static analysis checks. -The util tests are run as part of `ctest` invocation. The fuzz tests, functional +The fuzz tests, functional tests and lint scripts can be run as explained in the sections below. # Running tests locally @@ -321,11 +320,6 @@ perf report -i /path/to/datadir/send-big-msgs.perf.data.xxxx --stdio | c++filt | For ways to generate more granular profiles, see the README in [test/functional](/test/functional). -### Util tests - -Util tests can be run locally by running `build/test/util/test_runner.py`. -Use the `-v` option for verbose output. - ### Lint tests See the README in [test/lint](/test/lint). diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 340a418e104b..e7105cb73652 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -170,6 +170,7 @@ 'wallet_txn_doublespend.py --mineblock', 'tool_bitcoin_chainstate.py', 'tool_wallet.py', + 'tool_utils.py', 'tool_signet_miner.py', 'wallet_txn_clone.py', 'wallet_txn_clone.py --segwit', diff --git a/test/functional/tool_utils.py b/test/functional/tool_utils.py new file mode 100755 index 000000000000..3e36d05a50b9 --- /dev/null +++ b/test/functional/tool_utils.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# Copyright 2014 BitPay Inc. +# Copyright 2016-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://opensource.org/license/mit. +"""Exercise the utils via json-defined tests.""" + +from test_framework.test_framework import BitcoinTestFramework + +import difflib +import json +import logging +import os +import subprocess +from pathlib import Path + + +class ToolUtils(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 0 # No node/datadir needed + + def setup_network(self): + pass + + def skip_test_if_missing_module(self): + self.skip_if_no_bitcoin_tx() + self.skip_if_no_bitcoin_util() + + def run_test(self): + self.testcase_dir = Path(self.config["environment"]["SRCDIR"]) / "test" / "util" / "data" + self.bins = self.get_binaries() + with open(self.testcase_dir / "bitcoin-util-test.json", encoding="utf8") as f: + input_data = json.loads(f.read()) + + for i, test_obj in enumerate(input_data): + self.log.debug(f"Running [{i}]: " + test_obj["description"]) + self.test_one(test_obj) + + def test_one(self, testObj): + """Runs a single test, comparing output and RC to expected output and RC. + + Raises an error if input can't be read, executable fails, or output/RC + are not as expected. Error is caught by bctester() and reported. + """ + # Get the exec names and arguments + if testObj["exec"] == "./bitcoin-util": + execrun = self.bins.util_argv() + testObj["args"] + elif testObj["exec"] == "./bitcoin-tx": + execrun = self.bins.tx_argv() + testObj["args"] + + # Read the input data (if there is any) + inputData = None + if "input" in testObj: + with open(self.testcase_dir / testObj["input"], encoding="utf8") as f: + inputData = f.read() + + # Read the expected output data (if there is any) + outputFn = None + outputData = None + outputType = None + if "output_cmp" in testObj: + outputFn = testObj['output_cmp'] + outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare) + try: + with open(self.testcase_dir / outputFn, encoding="utf8") as f: + outputData = f.read() + except Exception: + logging.error("Output file " + outputFn + " cannot be opened") + raise + if not outputData: + logging.error("Output data missing for " + outputFn) + raise Exception + if not outputType: + logging.error("Output file %s does not have a file extension" % outputFn) + raise Exception + + # Run the test + try: + res = subprocess.run(execrun, capture_output=True, text=True, input=inputData) + except OSError: + logging.error("OSError, Failed to execute " + str(execrun)) + raise + + if outputData: + data_mismatch, formatting_mismatch = False, False + # Parse command output and expected output + try: + a_parsed = parse_output(res.stdout, outputType) + except Exception as e: + logging.error(f"Error parsing command output as {outputType}: '{str(e)}'; res: {str(res)}") + raise + try: + b_parsed = parse_output(outputData, outputType) + except Exception as e: + logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e)) + raise + # Compare data + if a_parsed != b_parsed: + logging.error(f"Output data mismatch for {outputFn} (format {outputType}); res: {str(res)}") + data_mismatch = True + # Compare formatting + if res.stdout != outputData: + error_message = f"Output formatting mismatch for {outputFn}:\nres: {str(res)}\n" + error_message += "".join(difflib.context_diff(outputData.splitlines(True), + res.stdout.splitlines(True), + fromfile=outputFn, + tofile="returned")) + logging.error(error_message) + formatting_mismatch = True + + assert not data_mismatch and not formatting_mismatch + + # Compare the return code to the expected return code + wantRC = 0 + if "return_code" in testObj: + wantRC = testObj['return_code'] + if res.returncode != wantRC: + logging.error(f"Return code mismatch for {outputFn}; res: {str(res)}") + raise Exception + + if "error_txt" in testObj: + want_error = testObj["error_txt"] + # A partial match instead of an exact match makes writing tests easier + # and should be sufficient. + if want_error not in res.stderr: + logging.error(f"Error mismatch:\nExpected: {want_error}\nReceived: {res.stderr.rstrip()}\nres: {str(res)}") + raise Exception + else: + if res.stderr: + logging.error(f"Unexpected error received: {res.stderr.rstrip()}\nres: {str(res)}") + raise Exception + + +def parse_output(a, fmt): + """Parse the output according to specified format. + + Raise an error if the output can't be parsed.""" + if fmt == 'json': # json: compare parsed data + return json.loads(a) + elif fmt == 'hex': # hex: parse and compare binary data + return bytes.fromhex(a.strip()) + else: + raise NotImplementedError("Don't know how to compare %s" % fmt) + + +if __name__ == "__main__": + ToolUtils(__file__).main() diff --git a/test/util/test_runner.py b/test/util/test_runner.py deleted file mode 100755 index 11400b32ba33..000000000000 --- a/test/util/test_runner.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2014 BitPay Inc. -# Copyright 2016-present The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test framework for bitcoin utils. - -Runs automatically during `ctest --test-dir build/`. - -Can also be run manually.""" - -import argparse -import configparser -import difflib -import json -import logging -import os -import pprint -import subprocess -import sys - -def main(): - config = configparser.ConfigParser() - config.optionxform = str - with open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8") as f: - config.read_file(f) - env_conf = dict(config.items('environment')) - - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument('-v', '--verbose', action='store_true') - args = parser.parse_args() - verbose = args.verbose - - if verbose: - level = logging.DEBUG - else: - level = logging.ERROR - formatter = '%(asctime)s - %(levelname)s - %(message)s' - # Add the format/level to the logger - logging.basicConfig(format=formatter, level=level) - - bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf) - -def bctester(testDir, input_basename, buildenv): - """ Loads and parses the input file, runs all tests and reports results""" - input_filename = os.path.join(testDir, input_basename) - with open(input_filename, encoding="utf8") as f: - raw_data = f.read() - input_data = json.loads(raw_data) - - failed_testcases = [] - - for testObj in input_data: - try: - bctest(testDir, testObj, buildenv) - logging.info("PASSED: " + testObj["description"]) - except Exception: - logging.info("FAILED: " + testObj["description"]) - failed_testcases.append(testObj["description"]) - - if failed_testcases: - error_message = "FAILED_TESTCASES:\n" - error_message += pprint.pformat(failed_testcases, width=400) - logging.error(error_message) - sys.exit(1) - else: - sys.exit(0) - -def bctest(testDir, testObj, buildenv): - """Runs a single test, comparing output and RC to expected output and RC. - - Raises an error if input can't be read, executable fails, or output/RC - are not as expected. Error is caught by bctester() and reported. - """ - # Get the exec names and arguments - execprog = os.path.join(buildenv["BUILDDIR"], "bin", testObj["exec"] + buildenv["EXEEXT"]) - if testObj["exec"] == "./bitcoin-util": - execprog = os.getenv("BITCOINUTIL", default=execprog) - elif testObj["exec"] == "./bitcoin-tx": - execprog = os.getenv("BITCOINTX", default=execprog) - - execargs = testObj['args'] - execrun = [execprog] + execargs - - # Read the input data (if there is any) - inputData = None - if "input" in testObj: - filename = os.path.join(testDir, testObj["input"]) - with open(filename, encoding="utf8") as f: - inputData = f.read() - - # Read the expected output data (if there is any) - outputFn = None - outputData = None - outputType = None - if "output_cmp" in testObj: - outputFn = testObj['output_cmp'] - outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare) - try: - with open(os.path.join(testDir, outputFn), encoding="utf8") as f: - outputData = f.read() - except Exception: - logging.error("Output file " + outputFn + " cannot be opened") - raise - if not outputData: - logging.error("Output data missing for " + outputFn) - raise Exception - if not outputType: - logging.error("Output file %s does not have a file extension" % outputFn) - raise Exception - - # Run the test - try: - res = subprocess.run(execrun, capture_output=True, text=True, input=inputData) - except OSError: - logging.error("OSError, Failed to execute " + execprog) - raise - - if outputData: - data_mismatch, formatting_mismatch = False, False - # Parse command output and expected output - try: - a_parsed = parse_output(res.stdout, outputType) - except Exception as e: - logging.error(f"Error parsing command output as {outputType}: '{str(e)}'; res: {str(res)}") - raise - try: - b_parsed = parse_output(outputData, outputType) - except Exception as e: - logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e)) - raise - # Compare data - if a_parsed != b_parsed: - logging.error(f"Output data mismatch for {outputFn} (format {outputType}); res: {str(res)}") - data_mismatch = True - # Compare formatting - if res.stdout != outputData: - error_message = f"Output formatting mismatch for {outputFn}:\nres: {str(res)}\n" - error_message += "".join(difflib.context_diff(outputData.splitlines(True), - res.stdout.splitlines(True), - fromfile=outputFn, - tofile="returned")) - logging.error(error_message) - formatting_mismatch = True - - assert not data_mismatch and not formatting_mismatch - - # Compare the return code to the expected return code - wantRC = 0 - if "return_code" in testObj: - wantRC = testObj['return_code'] - if res.returncode != wantRC: - logging.error(f"Return code mismatch for {outputFn}; res: {str(res)}") - raise Exception - - if "error_txt" in testObj: - want_error = testObj["error_txt"] - # A partial match instead of an exact match makes writing tests easier - # and should be sufficient. - if want_error not in res.stderr: - logging.error(f"Error mismatch:\nExpected: {want_error}\nReceived: {res.stderr.rstrip()}\nres: {str(res)}") - raise Exception - else: - if res.stderr: - logging.error(f"Unexpected error received: {res.stderr.rstrip()}\nres: {str(res)}") - raise Exception - - -def parse_output(a, fmt): - """Parse the output according to specified format. - - Raise an error if the output can't be parsed.""" - if fmt == 'json': # json: compare parsed data - return json.loads(a) - elif fmt == 'hex': # hex: parse and compare binary data - return bytes.fromhex(a.strip()) - else: - raise NotImplementedError("Don't know how to compare %s" % fmt) - -if __name__ == '__main__': - main() From fa2f1c55b7da729eb6bb9f88bf48459235cc2664 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 13 Jun 2025 10:05:40 +0200 Subject: [PATCH 055/390] move-only util data to test/functional/data/util --- test/{util/data => functional/data/util}/bitcoin-util-test.json | 0 test/{util/data => functional/data/util}/blanktxv1.hex | 0 test/{util/data => functional/data/util}/blanktxv1.json | 0 test/{util/data => functional/data/util}/blanktxv2.hex | 0 test/{util/data => functional/data/util}/blanktxv2.json | 0 test/{util/data => functional/data/util}/tt-delin1-out.hex | 0 test/{util/data => functional/data/util}/tt-delin1-out.json | 0 test/{util/data => functional/data/util}/tt-delout1-out.hex | 0 test/{util/data => functional/data/util}/tt-delout1-out.json | 0 .../data => functional/data/util}/tt-locktime317000-out.hex | 0 .../data => functional/data/util}/tt-locktime317000-out.json | 0 test/{util/data => functional/data/util}/tx394b54bb.hex | 0 test/{util/data => functional/data/util}/txcreate1.hex | 0 test/{util/data => functional/data/util}/txcreate1.json | 0 test/{util/data => functional/data/util}/txcreate2.hex | 0 test/{util/data => functional/data/util}/txcreate2.json | 0 test/{util/data => functional/data/util}/txcreatedata1.hex | 0 test/{util/data => functional/data/util}/txcreatedata1.json | 0 test/{util/data => functional/data/util}/txcreatedata2.hex | 0 test/{util/data => functional/data/util}/txcreatedata2.json | 0 test/{util/data => functional/data/util}/txcreatedata_seq0.hex | 0 test/{util/data => functional/data/util}/txcreatedata_seq0.json | 0 test/{util/data => functional/data/util}/txcreatedata_seq1.hex | 0 test/{util/data => functional/data/util}/txcreatedata_seq1.json | 0 test/{util/data => functional/data/util}/txcreatemultisig1.hex | 0 test/{util/data => functional/data/util}/txcreatemultisig1.json | 0 test/{util/data => functional/data/util}/txcreatemultisig2.hex | 0 test/{util/data => functional/data/util}/txcreatemultisig2.json | 0 test/{util/data => functional/data/util}/txcreatemultisig3.hex | 0 test/{util/data => functional/data/util}/txcreatemultisig3.json | 0 test/{util/data => functional/data/util}/txcreatemultisig4.hex | 0 test/{util/data => functional/data/util}/txcreatemultisig4.json | 0 test/{util/data => functional/data/util}/txcreatemultisig5.json | 0 test/{util/data => functional/data/util}/txcreateoutpubkey1.hex | 0 .../{util/data => functional/data/util}/txcreateoutpubkey1.json | 0 test/{util/data => functional/data/util}/txcreateoutpubkey2.hex | 0 .../{util/data => functional/data/util}/txcreateoutpubkey2.json | 0 test/{util/data => functional/data/util}/txcreateoutpubkey3.hex | 0 .../{util/data => functional/data/util}/txcreateoutpubkey3.json | 0 test/{util/data => functional/data/util}/txcreatescript1.hex | 0 test/{util/data => functional/data/util}/txcreatescript1.json | 0 test/{util/data => functional/data/util}/txcreatescript2.hex | 0 test/{util/data => functional/data/util}/txcreatescript2.json | 0 test/{util/data => functional/data/util}/txcreatescript3.hex | 0 test/{util/data => functional/data/util}/txcreatescript3.json | 0 test/{util/data => functional/data/util}/txcreatescript4.hex | 0 test/{util/data => functional/data/util}/txcreatescript4.json | 0 test/{util/data => functional/data/util}/txcreatescript5.hex | 0 test/{util/data => functional/data/util}/txcreatescript6.hex | 0 .../{util/data => functional/data/util}/txcreatesignsegwit1.hex | 0 test/{util/data => functional/data/util}/txcreatesignv1.hex | 0 test/{util/data => functional/data/util}/txcreatesignv1.json | 0 test/{util/data => functional/data/util}/txcreatesignv2.hex | 0 test/{util/data => functional/data/util}/txreplace1.hex | 0 test/{util/data => functional/data/util}/txreplacenoinputs.hex | 0 test/{util/data => functional/data/util}/txreplaceomittedn.hex | 0 .../data => functional/data/util}/txreplacesingleinput.hex | 0 test/functional/tool_utils.py | 2 +- 58 files changed, 1 insertion(+), 1 deletion(-) rename test/{util/data => functional/data/util}/bitcoin-util-test.json (100%) rename test/{util/data => functional/data/util}/blanktxv1.hex (100%) rename test/{util/data => functional/data/util}/blanktxv1.json (100%) rename test/{util/data => functional/data/util}/blanktxv2.hex (100%) rename test/{util/data => functional/data/util}/blanktxv2.json (100%) rename test/{util/data => functional/data/util}/tt-delin1-out.hex (100%) rename test/{util/data => functional/data/util}/tt-delin1-out.json (100%) rename test/{util/data => functional/data/util}/tt-delout1-out.hex (100%) rename test/{util/data => functional/data/util}/tt-delout1-out.json (100%) rename test/{util/data => functional/data/util}/tt-locktime317000-out.hex (100%) rename test/{util/data => functional/data/util}/tt-locktime317000-out.json (100%) rename test/{util/data => functional/data/util}/tx394b54bb.hex (100%) rename test/{util/data => functional/data/util}/txcreate1.hex (100%) rename test/{util/data => functional/data/util}/txcreate1.json (100%) rename test/{util/data => functional/data/util}/txcreate2.hex (100%) rename test/{util/data => functional/data/util}/txcreate2.json (100%) rename test/{util/data => functional/data/util}/txcreatedata1.hex (100%) rename test/{util/data => functional/data/util}/txcreatedata1.json (100%) rename test/{util/data => functional/data/util}/txcreatedata2.hex (100%) rename test/{util/data => functional/data/util}/txcreatedata2.json (100%) rename test/{util/data => functional/data/util}/txcreatedata_seq0.hex (100%) rename test/{util/data => functional/data/util}/txcreatedata_seq0.json (100%) rename test/{util/data => functional/data/util}/txcreatedata_seq1.hex (100%) rename test/{util/data => functional/data/util}/txcreatedata_seq1.json (100%) rename test/{util/data => functional/data/util}/txcreatemultisig1.hex (100%) rename test/{util/data => functional/data/util}/txcreatemultisig1.json (100%) rename test/{util/data => functional/data/util}/txcreatemultisig2.hex (100%) rename test/{util/data => functional/data/util}/txcreatemultisig2.json (100%) rename test/{util/data => functional/data/util}/txcreatemultisig3.hex (100%) rename test/{util/data => functional/data/util}/txcreatemultisig3.json (100%) rename test/{util/data => functional/data/util}/txcreatemultisig4.hex (100%) rename test/{util/data => functional/data/util}/txcreatemultisig4.json (100%) rename test/{util/data => functional/data/util}/txcreatemultisig5.json (100%) rename test/{util/data => functional/data/util}/txcreateoutpubkey1.hex (100%) rename test/{util/data => functional/data/util}/txcreateoutpubkey1.json (100%) rename test/{util/data => functional/data/util}/txcreateoutpubkey2.hex (100%) rename test/{util/data => functional/data/util}/txcreateoutpubkey2.json (100%) rename test/{util/data => functional/data/util}/txcreateoutpubkey3.hex (100%) rename test/{util/data => functional/data/util}/txcreateoutpubkey3.json (100%) rename test/{util/data => functional/data/util}/txcreatescript1.hex (100%) rename test/{util/data => functional/data/util}/txcreatescript1.json (100%) rename test/{util/data => functional/data/util}/txcreatescript2.hex (100%) rename test/{util/data => functional/data/util}/txcreatescript2.json (100%) rename test/{util/data => functional/data/util}/txcreatescript3.hex (100%) rename test/{util/data => functional/data/util}/txcreatescript3.json (100%) rename test/{util/data => functional/data/util}/txcreatescript4.hex (100%) rename test/{util/data => functional/data/util}/txcreatescript4.json (100%) rename test/{util/data => functional/data/util}/txcreatescript5.hex (100%) rename test/{util/data => functional/data/util}/txcreatescript6.hex (100%) rename test/{util/data => functional/data/util}/txcreatesignsegwit1.hex (100%) rename test/{util/data => functional/data/util}/txcreatesignv1.hex (100%) rename test/{util/data => functional/data/util}/txcreatesignv1.json (100%) rename test/{util/data => functional/data/util}/txcreatesignv2.hex (100%) rename test/{util/data => functional/data/util}/txreplace1.hex (100%) rename test/{util/data => functional/data/util}/txreplacenoinputs.hex (100%) rename test/{util/data => functional/data/util}/txreplaceomittedn.hex (100%) rename test/{util/data => functional/data/util}/txreplacesingleinput.hex (100%) diff --git a/test/util/data/bitcoin-util-test.json b/test/functional/data/util/bitcoin-util-test.json similarity index 100% rename from test/util/data/bitcoin-util-test.json rename to test/functional/data/util/bitcoin-util-test.json diff --git a/test/util/data/blanktxv1.hex b/test/functional/data/util/blanktxv1.hex similarity index 100% rename from test/util/data/blanktxv1.hex rename to test/functional/data/util/blanktxv1.hex diff --git a/test/util/data/blanktxv1.json b/test/functional/data/util/blanktxv1.json similarity index 100% rename from test/util/data/blanktxv1.json rename to test/functional/data/util/blanktxv1.json diff --git a/test/util/data/blanktxv2.hex b/test/functional/data/util/blanktxv2.hex similarity index 100% rename from test/util/data/blanktxv2.hex rename to test/functional/data/util/blanktxv2.hex diff --git a/test/util/data/blanktxv2.json b/test/functional/data/util/blanktxv2.json similarity index 100% rename from test/util/data/blanktxv2.json rename to test/functional/data/util/blanktxv2.json diff --git a/test/util/data/tt-delin1-out.hex b/test/functional/data/util/tt-delin1-out.hex similarity index 100% rename from test/util/data/tt-delin1-out.hex rename to test/functional/data/util/tt-delin1-out.hex diff --git a/test/util/data/tt-delin1-out.json b/test/functional/data/util/tt-delin1-out.json similarity index 100% rename from test/util/data/tt-delin1-out.json rename to test/functional/data/util/tt-delin1-out.json diff --git a/test/util/data/tt-delout1-out.hex b/test/functional/data/util/tt-delout1-out.hex similarity index 100% rename from test/util/data/tt-delout1-out.hex rename to test/functional/data/util/tt-delout1-out.hex diff --git a/test/util/data/tt-delout1-out.json b/test/functional/data/util/tt-delout1-out.json similarity index 100% rename from test/util/data/tt-delout1-out.json rename to test/functional/data/util/tt-delout1-out.json diff --git a/test/util/data/tt-locktime317000-out.hex b/test/functional/data/util/tt-locktime317000-out.hex similarity index 100% rename from test/util/data/tt-locktime317000-out.hex rename to test/functional/data/util/tt-locktime317000-out.hex diff --git a/test/util/data/tt-locktime317000-out.json b/test/functional/data/util/tt-locktime317000-out.json similarity index 100% rename from test/util/data/tt-locktime317000-out.json rename to test/functional/data/util/tt-locktime317000-out.json diff --git a/test/util/data/tx394b54bb.hex b/test/functional/data/util/tx394b54bb.hex similarity index 100% rename from test/util/data/tx394b54bb.hex rename to test/functional/data/util/tx394b54bb.hex diff --git a/test/util/data/txcreate1.hex b/test/functional/data/util/txcreate1.hex similarity index 100% rename from test/util/data/txcreate1.hex rename to test/functional/data/util/txcreate1.hex diff --git a/test/util/data/txcreate1.json b/test/functional/data/util/txcreate1.json similarity index 100% rename from test/util/data/txcreate1.json rename to test/functional/data/util/txcreate1.json diff --git a/test/util/data/txcreate2.hex b/test/functional/data/util/txcreate2.hex similarity index 100% rename from test/util/data/txcreate2.hex rename to test/functional/data/util/txcreate2.hex diff --git a/test/util/data/txcreate2.json b/test/functional/data/util/txcreate2.json similarity index 100% rename from test/util/data/txcreate2.json rename to test/functional/data/util/txcreate2.json diff --git a/test/util/data/txcreatedata1.hex b/test/functional/data/util/txcreatedata1.hex similarity index 100% rename from test/util/data/txcreatedata1.hex rename to test/functional/data/util/txcreatedata1.hex diff --git a/test/util/data/txcreatedata1.json b/test/functional/data/util/txcreatedata1.json similarity index 100% rename from test/util/data/txcreatedata1.json rename to test/functional/data/util/txcreatedata1.json diff --git a/test/util/data/txcreatedata2.hex b/test/functional/data/util/txcreatedata2.hex similarity index 100% rename from test/util/data/txcreatedata2.hex rename to test/functional/data/util/txcreatedata2.hex diff --git a/test/util/data/txcreatedata2.json b/test/functional/data/util/txcreatedata2.json similarity index 100% rename from test/util/data/txcreatedata2.json rename to test/functional/data/util/txcreatedata2.json diff --git a/test/util/data/txcreatedata_seq0.hex b/test/functional/data/util/txcreatedata_seq0.hex similarity index 100% rename from test/util/data/txcreatedata_seq0.hex rename to test/functional/data/util/txcreatedata_seq0.hex diff --git a/test/util/data/txcreatedata_seq0.json b/test/functional/data/util/txcreatedata_seq0.json similarity index 100% rename from test/util/data/txcreatedata_seq0.json rename to test/functional/data/util/txcreatedata_seq0.json diff --git a/test/util/data/txcreatedata_seq1.hex b/test/functional/data/util/txcreatedata_seq1.hex similarity index 100% rename from test/util/data/txcreatedata_seq1.hex rename to test/functional/data/util/txcreatedata_seq1.hex diff --git a/test/util/data/txcreatedata_seq1.json b/test/functional/data/util/txcreatedata_seq1.json similarity index 100% rename from test/util/data/txcreatedata_seq1.json rename to test/functional/data/util/txcreatedata_seq1.json diff --git a/test/util/data/txcreatemultisig1.hex b/test/functional/data/util/txcreatemultisig1.hex similarity index 100% rename from test/util/data/txcreatemultisig1.hex rename to test/functional/data/util/txcreatemultisig1.hex diff --git a/test/util/data/txcreatemultisig1.json b/test/functional/data/util/txcreatemultisig1.json similarity index 100% rename from test/util/data/txcreatemultisig1.json rename to test/functional/data/util/txcreatemultisig1.json diff --git a/test/util/data/txcreatemultisig2.hex b/test/functional/data/util/txcreatemultisig2.hex similarity index 100% rename from test/util/data/txcreatemultisig2.hex rename to test/functional/data/util/txcreatemultisig2.hex diff --git a/test/util/data/txcreatemultisig2.json b/test/functional/data/util/txcreatemultisig2.json similarity index 100% rename from test/util/data/txcreatemultisig2.json rename to test/functional/data/util/txcreatemultisig2.json diff --git a/test/util/data/txcreatemultisig3.hex b/test/functional/data/util/txcreatemultisig3.hex similarity index 100% rename from test/util/data/txcreatemultisig3.hex rename to test/functional/data/util/txcreatemultisig3.hex diff --git a/test/util/data/txcreatemultisig3.json b/test/functional/data/util/txcreatemultisig3.json similarity index 100% rename from test/util/data/txcreatemultisig3.json rename to test/functional/data/util/txcreatemultisig3.json diff --git a/test/util/data/txcreatemultisig4.hex b/test/functional/data/util/txcreatemultisig4.hex similarity index 100% rename from test/util/data/txcreatemultisig4.hex rename to test/functional/data/util/txcreatemultisig4.hex diff --git a/test/util/data/txcreatemultisig4.json b/test/functional/data/util/txcreatemultisig4.json similarity index 100% rename from test/util/data/txcreatemultisig4.json rename to test/functional/data/util/txcreatemultisig4.json diff --git a/test/util/data/txcreatemultisig5.json b/test/functional/data/util/txcreatemultisig5.json similarity index 100% rename from test/util/data/txcreatemultisig5.json rename to test/functional/data/util/txcreatemultisig5.json diff --git a/test/util/data/txcreateoutpubkey1.hex b/test/functional/data/util/txcreateoutpubkey1.hex similarity index 100% rename from test/util/data/txcreateoutpubkey1.hex rename to test/functional/data/util/txcreateoutpubkey1.hex diff --git a/test/util/data/txcreateoutpubkey1.json b/test/functional/data/util/txcreateoutpubkey1.json similarity index 100% rename from test/util/data/txcreateoutpubkey1.json rename to test/functional/data/util/txcreateoutpubkey1.json diff --git a/test/util/data/txcreateoutpubkey2.hex b/test/functional/data/util/txcreateoutpubkey2.hex similarity index 100% rename from test/util/data/txcreateoutpubkey2.hex rename to test/functional/data/util/txcreateoutpubkey2.hex diff --git a/test/util/data/txcreateoutpubkey2.json b/test/functional/data/util/txcreateoutpubkey2.json similarity index 100% rename from test/util/data/txcreateoutpubkey2.json rename to test/functional/data/util/txcreateoutpubkey2.json diff --git a/test/util/data/txcreateoutpubkey3.hex b/test/functional/data/util/txcreateoutpubkey3.hex similarity index 100% rename from test/util/data/txcreateoutpubkey3.hex rename to test/functional/data/util/txcreateoutpubkey3.hex diff --git a/test/util/data/txcreateoutpubkey3.json b/test/functional/data/util/txcreateoutpubkey3.json similarity index 100% rename from test/util/data/txcreateoutpubkey3.json rename to test/functional/data/util/txcreateoutpubkey3.json diff --git a/test/util/data/txcreatescript1.hex b/test/functional/data/util/txcreatescript1.hex similarity index 100% rename from test/util/data/txcreatescript1.hex rename to test/functional/data/util/txcreatescript1.hex diff --git a/test/util/data/txcreatescript1.json b/test/functional/data/util/txcreatescript1.json similarity index 100% rename from test/util/data/txcreatescript1.json rename to test/functional/data/util/txcreatescript1.json diff --git a/test/util/data/txcreatescript2.hex b/test/functional/data/util/txcreatescript2.hex similarity index 100% rename from test/util/data/txcreatescript2.hex rename to test/functional/data/util/txcreatescript2.hex diff --git a/test/util/data/txcreatescript2.json b/test/functional/data/util/txcreatescript2.json similarity index 100% rename from test/util/data/txcreatescript2.json rename to test/functional/data/util/txcreatescript2.json diff --git a/test/util/data/txcreatescript3.hex b/test/functional/data/util/txcreatescript3.hex similarity index 100% rename from test/util/data/txcreatescript3.hex rename to test/functional/data/util/txcreatescript3.hex diff --git a/test/util/data/txcreatescript3.json b/test/functional/data/util/txcreatescript3.json similarity index 100% rename from test/util/data/txcreatescript3.json rename to test/functional/data/util/txcreatescript3.json diff --git a/test/util/data/txcreatescript4.hex b/test/functional/data/util/txcreatescript4.hex similarity index 100% rename from test/util/data/txcreatescript4.hex rename to test/functional/data/util/txcreatescript4.hex diff --git a/test/util/data/txcreatescript4.json b/test/functional/data/util/txcreatescript4.json similarity index 100% rename from test/util/data/txcreatescript4.json rename to test/functional/data/util/txcreatescript4.json diff --git a/test/util/data/txcreatescript5.hex b/test/functional/data/util/txcreatescript5.hex similarity index 100% rename from test/util/data/txcreatescript5.hex rename to test/functional/data/util/txcreatescript5.hex diff --git a/test/util/data/txcreatescript6.hex b/test/functional/data/util/txcreatescript6.hex similarity index 100% rename from test/util/data/txcreatescript6.hex rename to test/functional/data/util/txcreatescript6.hex diff --git a/test/util/data/txcreatesignsegwit1.hex b/test/functional/data/util/txcreatesignsegwit1.hex similarity index 100% rename from test/util/data/txcreatesignsegwit1.hex rename to test/functional/data/util/txcreatesignsegwit1.hex diff --git a/test/util/data/txcreatesignv1.hex b/test/functional/data/util/txcreatesignv1.hex similarity index 100% rename from test/util/data/txcreatesignv1.hex rename to test/functional/data/util/txcreatesignv1.hex diff --git a/test/util/data/txcreatesignv1.json b/test/functional/data/util/txcreatesignv1.json similarity index 100% rename from test/util/data/txcreatesignv1.json rename to test/functional/data/util/txcreatesignv1.json diff --git a/test/util/data/txcreatesignv2.hex b/test/functional/data/util/txcreatesignv2.hex similarity index 100% rename from test/util/data/txcreatesignv2.hex rename to test/functional/data/util/txcreatesignv2.hex diff --git a/test/util/data/txreplace1.hex b/test/functional/data/util/txreplace1.hex similarity index 100% rename from test/util/data/txreplace1.hex rename to test/functional/data/util/txreplace1.hex diff --git a/test/util/data/txreplacenoinputs.hex b/test/functional/data/util/txreplacenoinputs.hex similarity index 100% rename from test/util/data/txreplacenoinputs.hex rename to test/functional/data/util/txreplacenoinputs.hex diff --git a/test/util/data/txreplaceomittedn.hex b/test/functional/data/util/txreplaceomittedn.hex similarity index 100% rename from test/util/data/txreplaceomittedn.hex rename to test/functional/data/util/txreplaceomittedn.hex diff --git a/test/util/data/txreplacesingleinput.hex b/test/functional/data/util/txreplacesingleinput.hex similarity index 100% rename from test/util/data/txreplacesingleinput.hex rename to test/functional/data/util/txreplacesingleinput.hex diff --git a/test/functional/tool_utils.py b/test/functional/tool_utils.py index 3e36d05a50b9..2b175b89e6cb 100755 --- a/test/functional/tool_utils.py +++ b/test/functional/tool_utils.py @@ -27,7 +27,7 @@ def skip_test_if_missing_module(self): self.skip_if_no_bitcoin_util() def run_test(self): - self.testcase_dir = Path(self.config["environment"]["SRCDIR"]) / "test" / "util" / "data" + self.testcase_dir = Path(self.config["environment"]["SRCDIR"]) / "test" / "functional" / "data" / "util" self.bins = self.get_binaries() with open(self.testcase_dir / "bitcoin-util-test.json", encoding="utf8") as f: input_data = json.loads(f.read()) From 893e51ffeb0543e1c8d33e83b20c56f02d2b793c Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Fri, 13 Jun 2025 13:41:59 +0200 Subject: [PATCH 056/390] wallet: Correct dir iteration error handling Seems to have been broken since conversion from Boost in #20744. The std::filesystem iteration aborts upon failure while Boost might have allowed skipping over faulty entries. --- src/wallet/db.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index 9de0572947f8..40692997d203 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -26,16 +26,7 @@ std::vector> ListDatabases(const fs::path& wall std::error_code ec; for (auto it = fs::recursive_directory_iterator(wallet_dir, ec); it != fs::recursive_directory_iterator(); it.increment(ec)) { - if (ec) { - if (fs::is_directory(*it)) { - it.disable_recursion_pending(); - LogPrintf("%s: %s %s -- skipping.\n", __func__, ec.message(), fs::PathToString(it->path())); - } else { - LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(it->path())); - } - continue; - } - + assert(!ec); // Loop should exit on error. try { const fs::path path{it->path().lexically_relative(wallet_dir)}; @@ -69,6 +60,14 @@ std::vector> ListDatabases(const fs::path& wall it.disable_recursion_pending(); } } + if (ec) { + // Loop could have exited with an error due to one of: + // * wallet_dir itself not being scannable. + // * increment() failure. (Observed on Windows native builds when + // removing the ACL read permissions of a wallet directory after the + // process started). + LogWarning("Error scanning directory entries under %s: %s", fs::PathToString(wallet_dir), ec.message()); + } return paths; } From 17776443675ddf804f92042883ad36ed040438c3 Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Fri, 13 Jun 2025 13:55:54 +0200 Subject: [PATCH 057/390] qa, wallet: Verify warning when failing to scan --- test/functional/wallet_multiwallet.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index 87c8f6275520..2c87fd5711d8 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -78,6 +78,24 @@ def wallet_file(name): self.stop_nodes() assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True) + self.log.info("Verify warning is emitted when failing to scan the wallets directory") + if platform.system() == 'Windows': + self.log.warning('Skipping test involving chmod as Windows does not support it.') + elif os.geteuid() == 0: + self.log.warning('Skipping test involving chmod as it requires a non-root user.') + else: + self.start_node(0) + with self.nodes[0].assert_debug_log(unexpected_msgs=['Error scanning directory entries under'], expected_msgs=[]): + result = self.nodes[0].listwalletdir() + assert_equal(result, {'wallets': [{'name': 'default_wallet', 'warnings': []}]}) + os.chmod(data_dir('wallets'), 0) + with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning directory entries under']): + result = self.nodes[0].listwalletdir() + assert_equal(result, {'wallets': []}) + self.stop_node(0) + # Restore permissions + os.chmod(data_dir('wallets'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) + # create symlink to verify wallet directory path can be referenced # through symlink os.mkdir(wallet_dir('w7')) From 272cd09b796a36596b325277bb43cb47b19c8e12 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 10 Jun 2025 15:57:33 +0200 Subject: [PATCH 058/390] log: Use warning level while scanning wallet dir --- src/wallet/db.cpp | 6 +++--- test/functional/wallet_multiwallet.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp index 40692997d203..781b6fa461dc 100644 --- a/src/wallet/db.cpp +++ b/src/wallet/db.cpp @@ -56,7 +56,7 @@ std::vector> ListDatabases(const fs::path& wall } } } catch (const std::exception& e) { - LogPrintf("%s: Error scanning %s: %s\n", __func__, fs::PathToString(it->path()), e.what()); + LogWarning("Error while scanning wallet dir item: %s [%s].", e.what(), fs::PathToString(it->path())); it.disable_recursion_pending(); } } @@ -99,7 +99,7 @@ bool IsBDBFile(const fs::path& path) // This check also prevents opening lock files. std::error_code ec; auto size = fs::file_size(path, ec); - if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(path)); + if (ec) LogWarning("Error reading file_size: %s [%s]", ec.message(), fs::PathToString(path)); if (size < 4096) return false; std::ifstream file{path, std::ios::binary}; @@ -123,7 +123,7 @@ bool IsSQLiteFile(const fs::path& path) // A SQLite Database file is at least 512 bytes. std::error_code ec; auto size = fs::file_size(path, ec); - if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(path)); + if (ec) LogWarning("Error reading file_size: %s [%s]", ec.message(), fs::PathToString(path)); if (size < 512) return false; std::ifstream file{path, std::ios::binary}; diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index 2c87fd5711d8..a7b61e93280b 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -147,7 +147,7 @@ def wallet_file(name): os.mkdir(wallet_dir('no_access')) os.chmod(wallet_dir('no_access'), 0) try: - with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']): + with self.nodes[0].assert_debug_log(expected_msgs=["Error while scanning wallet dir"]): walletlist = self.nodes[0].listwalletdir()['wallets'] finally: # Need to ensure access is restored for cleanup From 74690f4ed82b1584abb07c0387db0d924c4c0cab Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Fri, 25 Apr 2025 15:25:45 +0200 Subject: [PATCH 059/390] validation: refactor TestBlockValidity Comments are expanded. Return BlockValidationState instead of passing a reference. Lock Chainman mutex instead of cs_main. Remove redundant chainparams and pindexPrev arguments. Drop defaults for checking proof-of-work and merkle root. The ContextualCheckBlockHeader check is moved to after CheckBlock, which is more similar to normal validation where context-free checks are done first. Validation failure reasons are no longer printed through LogError(), since it depends on the caller whether this implies an actual bug in the node, or an externally sourced block that happens to be invalid. When called from getblocktemplate, via BlockAssembler::CreateNewBlock(), this method already throws an std::runtime_error if validation fails. Additionally it moves the inconclusive-not-best-prevblk check from RPC code to TestBlockValidity. There is no behavior change when callling getblocktemplate with proposal. Previously this would return a BIP22ValidationResult which can throw for state.IsError(). But CheckBlock() and the functions it calls only use state.IsValid(). The final assert is changed into Assume, with a LogError. Co-authored-by: --- src/node/miner.cpp | 8 ++-- src/rpc/mining.cpp | 11 +----- src/validation.cpp | 96 +++++++++++++++++++++++++++++++--------------- src/validation.h | 30 +++++++++++---- 4 files changed, 94 insertions(+), 51 deletions(-) diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 7ffa7fb60c92..02a7684178cd 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -179,10 +179,10 @@ std::unique_ptr BlockAssembler::CreateNewBlock() pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus()); pblock->nNonce = 0; - BlockValidationState state; - if (m_options.test_block_validity && !TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev, - /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) { - throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString())); + if (m_options.test_block_validity) { + if (BlockValidationState state{TestBlockValidity(m_chainstate, *pblock, /*check_pow=*/false, /*check_merkle_root=*/false)}; !state.IsValid()) { + throw std::runtime_error(strprintf("TestBlockValidity failed: %s", state.ToString())); + } } const auto time_2{SteadyClock::now()}; diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 4fa62447af64..f11305f16b8b 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -388,8 +388,7 @@ static RPCHelpMan generateblock() block.vtx.insert(block.vtx.end(), txs.begin(), txs.end()); RegenerateCommitments(block, chainman); - BlockValidationState state; - if (!TestBlockValidity(state, chainman.GetParams(), chainman.ActiveChainstate(), block, chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/false)) { + if (BlockValidationState state{TestBlockValidity(chainman.ActiveChainstate(), block, /*check_pow=*/false, /*check_merkle_root=*/false)}; !state.IsValid()) { throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("TestBlockValidity failed: %s", state.ToString())); } } @@ -745,13 +744,7 @@ static RPCHelpMan getblocktemplate() return "duplicate-inconclusive"; } - // TestBlockValidity only supports blocks built on the current Tip - if (block.hashPrevBlock != tip) { - return "inconclusive-not-best-prevblk"; - } - BlockValidationState state; - TestBlockValidity(state, chainman.GetParams(), chainman.ActiveChainstate(), block, chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), /*fCheckPOW=*/false, /*fCheckMerkleRoot=*/true); - return BIP22ValidationResult(state); + return BIP22ValidationResult(TestBlockValidity(chainman.ActiveChainstate(), block, /*check_pow=*/false, /*check_merkle_root=*/true)); } const UniValue& aClientRules = oparam.find_value("rules"); diff --git a/src/validation.cpp b/src/validation.cpp index d5e92600d4f3..b440c2752b71 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -4600,42 +4600,78 @@ MempoolAcceptResult ChainstateManager::ProcessTransaction(const CTransactionRef& return result; } -bool TestBlockValidity(BlockValidationState& state, - const CChainParams& chainparams, - Chainstate& chainstate, - const CBlock& block, - CBlockIndex* pindexPrev, - bool fCheckPOW, - bool fCheckMerkleRoot) + +BlockValidationState TestBlockValidity( + Chainstate& chainstate, + const CBlock& block, + const bool check_pow, + const bool check_merkle_root) { - AssertLockHeld(cs_main); - assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip()); - CCoinsViewCache viewNew(&chainstate.CoinsTip()); - uint256 block_hash(block.GetHash()); - CBlockIndex indexDummy(block); - indexDummy.pprev = pindexPrev; - indexDummy.nHeight = pindexPrev->nHeight + 1; - indexDummy.phashBlock = &block_hash; - - // NOTE: CheckBlockHeader is called by CheckBlock - if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev)) { - LogError("%s: Consensus::ContextualCheckBlockHeader: %s\n", __func__, state.ToString()); - return false; + // Lock must be held throughout this function for two reasons: + // 1. We don't want the tip to change during several of the validation steps + // 2. To prevent a CheckBlock() race condition for fChecked, see ProcessNewBlock() + AssertLockHeld(chainstate.m_chainman.GetMutex()); + + BlockValidationState state; + CBlockIndex* tip{Assert(chainstate.m_chain.Tip())}; + + if (block.hashPrevBlock != *Assert(tip->phashBlock)) { + state.Invalid({}, "inconclusive-not-best-prevblk"); + return state; } - if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) { - LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString()); - return false; + + // For signets CheckBlock() verifies the challenge iff fCheckPow is set. + if (!CheckBlock(block, state, chainstate.m_chainman.GetConsensus(), /*fCheckPow=*/check_pow, /*fCheckMerkleRoot=*/check_merkle_root)) { + // This should never happen, but belt-and-suspenders don't approve the + // block if it does. + if (state.IsValid()) NONFATAL_UNREACHABLE(); + return state; } - if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev)) { - LogError("%s: Consensus::ContextualCheckBlock: %s\n", __func__, state.ToString()); - return false; + + /** + * At this point ProcessNewBlock would call AcceptBlock(), but we + * don't want to store the block or its header. Run individual checks + * instead: + * - skip AcceptBlockHeader() because: + * - we don't want to update the block index + * - we do not care about duplicates + * - we already ran CheckBlockHeader() via CheckBlock() + * - we already checked for prev-blk-not-found + * - we know the tip is valid, so no need to check bad-prevblk + * - we already ran CheckBlock() + * - do run ContextualCheckBlockHeader() + * - do run ContextualCheckBlock() + */ + + if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, tip)) { + if (state.IsValid()) NONFATAL_UNREACHABLE(); + return state; } - if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) { - return false; + + if (!ContextualCheckBlock(block, state, chainstate.m_chainman, tip)) { + if (state.IsValid()) NONFATAL_UNREACHABLE(); + return state; } - assert(state.IsValid()); - return true; + // We don't want ConnectBlock to update the actual chainstate, so create + // a cache on top of it, along with a dummy block index. + CBlockIndex index_dummy{block}; + uint256 block_hash(block.GetHash()); + index_dummy.pprev = tip; + index_dummy.nHeight = tip->nHeight + 1; + index_dummy.phashBlock = &block_hash; + CCoinsViewCache view_dummy(&chainstate.CoinsTip()); + + // Set fJustCheck to true in order to update, and not clear, validation caches. + if(!chainstate.ConnectBlock(block, state, &index_dummy, view_dummy, /*fJustCheck=*/true)) { + if (state.IsValid()) NONFATAL_UNREACHABLE(); + return state; + } + + // Ensure no check returned successfully while also setting an invalid state. + if (!state.IsValid()) NONFATAL_UNREACHABLE(); + + return state; } /* This function is called from the RPC code for pruneblockchain */ diff --git a/src/validation.h b/src/validation.h index c7e70e3adbc0..9b3d1f00f2e3 100644 --- a/src/validation.h +++ b/src/validation.h @@ -384,14 +384,28 @@ class ValidationCache /** Context-independent validity checks */ bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true, bool fCheckMerkleRoot = true); -/** Check a block is completely valid from start to finish (only works on top of our current best block) */ -bool TestBlockValidity(BlockValidationState& state, - const CChainParams& chainparams, - Chainstate& chainstate, - const CBlock& block, - CBlockIndex* pindexPrev, - bool fCheckPOW = true, - bool fCheckMerkleRoot = true) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +/** + * Verify a block, including transactions. + * + * @param[in] block The block we want to process. Must connect to the + * current tip. + * @param[in] chainstate The chainstate to connect to. + * @param[in] check_pow perform proof-of-work check, nBits in the header + * is always checked + * @param[in] check_merkle_root check the merkle root + * + * @return Valid or Invalid state. This doesn't currently return an Error state, + * and shouldn't unless there is something wrong with the existing + * chainstate. (This is different from functions like AcceptBlock which + * can fail trying to save new data.) + * + * For signets the challenge verification is skipped when check_pow is false. + */ +BlockValidationState TestBlockValidity( + Chainstate& chainstate, + const CBlock& block, + bool check_pow, + bool check_merkle_root) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Check with the proof of work on each blockheader matches the value in nBits */ bool HasValidProofOfWork(const std::vector& headers, const Consensus::Params& consensusParams); From 6077157531c1cec6dea8e6f90b4df8ef7b5cec4e Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Thu, 23 Jan 2025 12:59:54 +0100 Subject: [PATCH 060/390] ipc: drop BlockValidationState special handling The Mining interface avoids using BlockValidationState. --- src/ipc/capnp/mining-types.h | 8 +------- src/ipc/capnp/mining.capnp | 10 ---------- src/ipc/capnp/mining.cpp | 36 ------------------------------------ src/test/ipc_test.capnp | 3 +-- src/test/ipc_test.cpp | 19 ------------------- 5 files changed, 2 insertions(+), 74 deletions(-) diff --git a/src/ipc/capnp/mining-types.h b/src/ipc/capnp/mining-types.h index 2e60b43fcf3d..62789759490f 100644 --- a/src/ipc/capnp/mining-types.h +++ b/src/ipc/capnp/mining-types.h @@ -14,13 +14,7 @@ #include namespace mp { -// Custom serialization for BlockValidationState. -void CustomBuildMessage(InvokeContext& invoke_context, - const BlockValidationState& src, - ipc::capnp::messages::BlockValidationState::Builder&& builder); -void CustomReadMessage(InvokeContext& invoke_context, - const ipc::capnp::messages::BlockValidationState::Reader& reader, - BlockValidationState& dest); +// Custom serializations } // namespace mp #endif // BITCOIN_IPC_CAPNP_MINING_TYPES_H diff --git a/src/ipc/capnp/mining.capnp b/src/ipc/capnp/mining.capnp index 32048e0ed199..f3327bf2e7ba 100644 --- a/src/ipc/capnp/mining.capnp +++ b/src/ipc/capnp/mining.capnp @@ -44,13 +44,3 @@ struct BlockWaitOptions $Proxy.wrap("node::BlockWaitOptions") { timeout @0 : Float64 $Proxy.name("timeout"); feeThreshold @1 : Int64 $Proxy.name("fee_threshold"); } - -# Note: serialization of the BlockValidationState C++ type is somewhat fragile -# and using the struct can be awkward. It would be good if testBlockValidity -# method were changed to return validity information in a simpler format. -struct BlockValidationState { - mode @0 :Int32; - result @1 :Int32; - rejectReason @2 :Text; - debugMessage @3 :Text; -} diff --git a/src/ipc/capnp/mining.cpp b/src/ipc/capnp/mining.cpp index 0f9533c1c732..f598f1b2d8e3 100644 --- a/src/ipc/capnp/mining.cpp +++ b/src/ipc/capnp/mining.cpp @@ -8,40 +8,4 @@ #include namespace mp { -void CustomBuildMessage(InvokeContext& invoke_context, - const BlockValidationState& src, - ipc::capnp::messages::BlockValidationState::Builder&& builder) -{ - if (src.IsValid()) { - builder.setMode(0); - } else if (src.IsInvalid()) { - builder.setMode(1); - } else if (src.IsError()) { - builder.setMode(2); - } else { - assert(false); - } - builder.setResult(static_cast(src.GetResult())); - builder.setRejectReason(src.GetRejectReason()); - builder.setDebugMessage(src.GetDebugMessage()); -} - -void CustomReadMessage(InvokeContext& invoke_context, - const ipc::capnp::messages::BlockValidationState::Reader& reader, - BlockValidationState& dest) -{ - if (reader.getMode() == 0) { - assert(reader.getResult() == 0); - assert(reader.getRejectReason().size() == 0); - assert(reader.getDebugMessage().size() == 0); - } else if (reader.getMode() == 1) { - dest.Invalid(static_cast(reader.getResult()), reader.getRejectReason(), reader.getDebugMessage()); - } else if (reader.getMode() == 2) { - assert(reader.getResult() == 0); - dest.Error(reader.getRejectReason()); - assert(reader.getDebugMessage().size() == 0); - } else { - assert(false); - } -} } // namespace mp diff --git a/src/test/ipc_test.capnp b/src/test/ipc_test.capnp index 7fd59cf5882d..e33f711bf3c4 100644 --- a/src/test/ipc_test.capnp +++ b/src/test/ipc_test.capnp @@ -19,6 +19,5 @@ interface FooInterface $Proxy.wrap("FooImplementation") { passUniValue @2 (arg :Text) -> (result :Text); passTransaction @3 (arg :Data) -> (result :Data); passVectorChar @4 (arg :Data) -> (result :Data); - passBlockState @5 (arg :Mining.BlockValidationState) -> (result :Mining.BlockValidationState); - passScript @6 (arg :Data) -> (result :Data); + passScript @5 (arg :Data) -> (result :Data); } diff --git a/src/test/ipc_test.cpp b/src/test/ipc_test.cpp index dd46883fb095..b4d7ad354cc8 100644 --- a/src/test/ipc_test.cpp +++ b/src/test/ipc_test.cpp @@ -102,25 +102,6 @@ void IpcPipeTest() std::vector vec2{foo->passVectorChar(vec1)}; BOOST_CHECK_EQUAL(std::string_view(vec1.begin(), vec1.end()), std::string_view(vec2.begin(), vec2.end())); - BlockValidationState bs1; - bs1.Invalid(BlockValidationResult::BLOCK_MUTATED, "reject reason", "debug message"); - BlockValidationState bs2{foo->passBlockState(bs1)}; - BOOST_CHECK_EQUAL(bs1.IsValid(), bs2.IsValid()); - BOOST_CHECK_EQUAL(bs1.IsError(), bs2.IsError()); - BOOST_CHECK_EQUAL(bs1.IsInvalid(), bs2.IsInvalid()); - BOOST_CHECK_EQUAL(static_cast(bs1.GetResult()), static_cast(bs2.GetResult())); - BOOST_CHECK_EQUAL(bs1.GetRejectReason(), bs2.GetRejectReason()); - BOOST_CHECK_EQUAL(bs1.GetDebugMessage(), bs2.GetDebugMessage()); - - BlockValidationState bs3; - BlockValidationState bs4{foo->passBlockState(bs3)}; - BOOST_CHECK_EQUAL(bs3.IsValid(), bs4.IsValid()); - BOOST_CHECK_EQUAL(bs3.IsError(), bs4.IsError()); - BOOST_CHECK_EQUAL(bs3.IsInvalid(), bs4.IsInvalid()); - BOOST_CHECK_EQUAL(static_cast(bs3.GetResult()), static_cast(bs4.GetResult())); - BOOST_CHECK_EQUAL(bs3.GetRejectReason(), bs4.GetRejectReason()); - BOOST_CHECK_EQUAL(bs3.GetDebugMessage(), bs4.GetDebugMessage()); - auto script1{CScript() << OP_11}; auto script2{foo->passScript(script1)}; BOOST_CHECK_EQUAL(HexStr(script1), HexStr(script2)); From 94959b8deedcff98a55c87b5e473890b2e7a3b16 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Tue, 20 May 2025 09:25:03 +0200 Subject: [PATCH 061/390] Add checkBlock to Mining interface Use it in miner_tests. The getblocktemplate and generateblock RPC calls don't use this, because it would make the code more verbose. --- src/interfaces/mining.h | 15 ++++++++++++++ src/ipc/capnp/mining.capnp | 6 ++++++ src/node/interfaces.cpp | 9 +++++++++ src/node/types.h | 12 ++++++++++++ src/test/miner_tests.cpp | 40 +++++++++++++++++++++++++++++++++++++- 5 files changed, 81 insertions(+), 1 deletion(-) diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h index 3ecb86e8ea11..150295e5b7b4 100644 --- a/src/interfaces/mining.h +++ b/src/interfaces/mining.h @@ -114,6 +114,21 @@ class Mining */ virtual std::unique_ptr createNewBlock(const node::BlockCreateOptions& options = {}) = 0; + /** + * Checks if a given block is valid. + * + * @param[in] block the block to check + * @param[in] options verification options: the proof-of-work check can be + * skipped in order to verify a template generated by + * external software. + * @param[out] reason failure reason (BIP22) + * @param[out] debug more detailed rejection reason + * @returns whether the block is valid + * + * For signets the challenge verification is skipped when check_pow is false. + */ + virtual bool checkBlock(const CBlock& block, const node::BlockCheckOptions& options, std::string& reason, std::string& debug) = 0; + //! Get internal node context. Useful for RPC and testing, //! but not accessible across processes. virtual node::NodeContext* context() { return nullptr; } diff --git a/src/ipc/capnp/mining.capnp b/src/ipc/capnp/mining.capnp index f3327bf2e7ba..8ee4745b8584 100644 --- a/src/ipc/capnp/mining.capnp +++ b/src/ipc/capnp/mining.capnp @@ -18,6 +18,7 @@ interface Mining $Proxy.wrap("interfaces::Mining") { getTip @2 (context :Proxy.Context) -> (result: Common.BlockRef, hasResult: Bool); waitTipChanged @3 (context :Proxy.Context, currentTip: Data, timeout: Float64) -> (result: Common.BlockRef); createNewBlock @4 (options: BlockCreateOptions) -> (result: BlockTemplate); + checkBlock @5 (block: Data, options: BlockCheckOptions) -> (reason: Text, debug: Text, result: Bool); } interface BlockTemplate $Proxy.wrap("interfaces::BlockTemplate") { @@ -44,3 +45,8 @@ struct BlockWaitOptions $Proxy.wrap("node::BlockWaitOptions") { timeout @0 : Float64 $Proxy.name("timeout"); feeThreshold @1 : Int64 $Proxy.name("fee_threshold"); } + +struct BlockCheckOptions $Proxy.wrap("node::BlockCheckOptions") { + checkMerkleRoot @0 :Bool $Proxy.name("check_merkle_root"); + checkPow @1 :Bool $Proxy.name("check_pow"); +} diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 04afc852e43e..1e659e0ee0db 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -984,6 +984,15 @@ class MinerImpl : public Mining return std::make_unique(assemble_options, BlockAssembler{chainman().ActiveChainstate(), context()->mempool.get(), assemble_options}.CreateNewBlock(), m_node); } + bool checkBlock(const CBlock& block, const node::BlockCheckOptions& options, std::string& reason, std::string& debug) override + { + LOCK(chainman().GetMutex()); + BlockValidationState state{TestBlockValidity(chainman().ActiveChainstate(), block, /*check_pow=*/options.check_pow, /*=check_merkle_root=*/options.check_merkle_root)}; + reason = state.GetRejectReason(); + debug = state.GetDebugMessage(); + return state.IsValid(); + } + NodeContext* context() override { return &m_node; } ChainstateManager& chainman() { return *Assert(m_node.chainman); } KernelNotifications& notifications() { return *Assert(m_node.notifications); } diff --git a/src/node/types.h b/src/node/types.h index 0f9b871084ad..547d644831c0 100644 --- a/src/node/types.h +++ b/src/node/types.h @@ -17,6 +17,7 @@ #include #include #include