From 1040ad0596579ae88d0fce0674e5b560c1a61d4f Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 14 Nov 2023 15:38:29 +0000 Subject: [PATCH 1/8] Merge bitcoin/bitcoin#28857: test, refactor: Magic bytes array followup 1e5b86171e81ab4b022b9746bb06e1968ecf4086 test: Add test for array serialization (TheCharlatan) d49d1988406f2f0d350bc5b552625f9823090130 refactor: Initialize magic bytes in constructor initializer (TheCharlatan) Pull request description: This is a followup-PR for #28423 * Initialize magic bytes in constructor * Add a small unit test for serializing arrays. ACKs for top commit: sipa: utACK 1e5b86171e81ab4b022b9746bb06e1968ecf4086 maflcko: lgtm ACK 1e5b86171e81ab4b022b9746bb06e1968ecf4086 Tree-SHA512: 0f58d2332dc501ca9fd419f40ed4f977c83dce0169e9a0eee1ffc9f8daa2d2ef7e7df18205ba076f55d90ae6c4a20d2b51ab303150d38470a962bcc58a66f6e7 --- src/test/serialize_tests.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index d0b6cc774ccf..ea5b8e5e1e2d 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -84,6 +84,8 @@ BOOST_AUTO_TEST_CASE(sizes) BOOST_CHECK_EQUAL(GetSerializeSize(int64_t(0), 0), 8U); BOOST_CHECK_EQUAL(GetSerializeSize(uint64_t(0), 0), 8U); BOOST_CHECK_EQUAL(GetSerializeSize(bool(0), 0), 1U); + BOOST_CHECK_EQUAL(GetSerializeSize(std::array{0}, 0), 1U); + BOOST_CHECK_EQUAL(GetSerializeSize(std::array{0, 0}, 0), 2U); } BOOST_AUTO_TEST_CASE(varints) @@ -178,6 +180,16 @@ BOOST_AUTO_TEST_CASE(vector_bool) BOOST_CHECK(SerializeHash(vec1) == SerializeHash(vec2)); } +BOOST_AUTO_TEST_CASE(array) +{ + std::array array1{1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1}; + DataStream ds; + ds << array1; + std::array array2; + ds >> array2; + BOOST_CHECK(array1 == array2); +} + BOOST_AUTO_TEST_CASE(noncanonical) { // Write some non-canonical CompactSize encodings, and From 6fa8dfe49a9f129ff5dbd445e27ac3e1f610331a Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 11 Mar 2024 09:34:19 -0400 Subject: [PATCH 2/8] Merge bitcoin/bitcoin#29458: refactor: Preallocate result in TryParseHex to avoid resizing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit a19235c14b3dc02de30b5d769de29d1752c23dbd Preallocate result in `TryParseHex` to avoid resizing (Lőrinc) b7489ecb52c1f99facb7c81c5e46963394d0620d Add benchmark for TryParseHex (Lőrinc) Pull request description: This pull request introduces optimizations to the `TryParseHex` function, focusing primarily on the ideal case (valid hexadecimal input without spaces). A new benchmark, `HexParse` was introduced in a separate commit. The main optimization preallocates the result vector based on the input string's length. This aims to completely avoid costly dynamic reallocations when no spaces are present. ------------ Before: ``` | ns/base16 | base16/s | err% | total | benchmark |--------------------:|--------------------:|--------:|----------:|:---------- | 1.60 | 623,238,893.11 | 0.3% | 0.01 | `HexParse` | 1.65 | 606,747,566.34 | 0.6% | 0.01 | `HexParse` | 1.60 | 626,149,544.07 | 0.3% | 0.01 | `HexParse` ``` After: ``` | ns/base16 | base16/s | err% | total | benchmark |--------------------:|--------------------:|--------:|----------:|:---------- | 0.68 | 1,465,555,976.27 | 0.8% | 0.01 | `HexParse` | 0.68 | 1,472,962,920.18 | 0.3% | 0.01 | `HexParse` | 0.68 | 1,476,159,423.00 | 0.3% | 0.01 | `HexParse` ``` ACKs for top commit: achow101: ACK a19235c14b3dc02de30b5d769de29d1752c23dbd hebasto: ACK a19235c14b3dc02de30b5d769de29d1752c23dbd. andrewtoth: Re-ACK a19235c14b3dc02de30b5d769de29d1752c23dbd Empact: Re-ACK https://github.com/bitcoin/bitcoin/pull/29458/commits/a19235c14b3dc02de30b5d769de29d1752c23dbd Tree-SHA512: e09a59791104be3fd1026862ce98de9efafa1f949626fa01e3b7d58e6a2ef02a11f0de55ddba5c43230a53effd24e6d368c1e12848b17e8ce91d7908a59333f0 --- src/Makefile.bench.include | 1 + src/bench/parse_hex.cpp | 36 ++++++++++++++++++++++++++++++++++++ src/util/strencodings.cpp | 2 ++ 3 files changed, 39 insertions(+) create mode 100644 src/bench/parse_hex.cpp diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index be7bfe2d2dcc..9bc2bc3894ef 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -45,6 +45,7 @@ bench_bench_dash_SOURCES = \ bench/merkle_root.cpp \ bench/nanobench.cpp \ bench/nanobench.h \ + bench/parse_hex.cpp \ bench/peer_eviction.cpp \ bench/poly1305.cpp \ bench/pool.cpp \ diff --git a/src/bench/parse_hex.cpp b/src/bench/parse_hex.cpp new file mode 100644 index 000000000000..db3ead043c81 --- /dev/null +++ b/src/bench/parse_hex.cpp @@ -0,0 +1,36 @@ +// Copyright (c) 2024- The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include + +std::string generateHexString(size_t length) { + const auto hex_digits = "0123456789ABCDEF"; + FastRandomContext rng(/*fDeterministic=*/true); + + std::string data; + while (data.size() < length) { + auto digit = hex_digits[rng.randbits(4)]; + data.push_back(digit); + } + return data; +} + +static void HexParse(benchmark::Bench& bench) +{ + auto data = generateHexString(130); // Generates 678B0EDA0A1FD30904D5A65E3568DB82DB2D918B0AD8DEA18A63FECCB877D07CAD1495C7157584D877420EF38B8DA473A6348B4F51811AC13C786B962BEE5668F9 by default + + bench.batch(data.size()).unit("base16").run([&] { + auto result = TryParseHex(data); + assert(result != std::nullopt); // make sure we're measuring the successful case + ankerl::nanobench::doNotOptimizeAway(result); + }); +} + +BENCHMARK(HexParse, benchmark::PriorityLevel::HIGH); diff --git a/src/util/strencodings.cpp b/src/util/strencodings.cpp index cbbbb5c9692b..5f0aea5d5cd0 100644 --- a/src/util/strencodings.cpp +++ b/src/util/strencodings.cpp @@ -81,6 +81,8 @@ template std::optional> TryParseHex(std::string_view str) { std::vector vch; + vch.reserve(str.size() / 2); // two hex characters form a single byte + auto it = str.begin(); while (it != str.end()) { if (IsSpace(*it)) { From 4feb3c9340ea6d43b52b165cc7aeee8bd66bf560 Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 12 Mar 2024 09:39:07 +0000 Subject: [PATCH 3/8] Merge bitcoin/bitcoin#29236: log: Nuke error(...) fa391513949b7a3b56321436e2015c7e9e6dac2b refactor: Remove unused error() (MarcoFalke) fad0335517096f435d76adce7833e213d3cc23d1 scripted-diff: Replace error() with LogError() (MarcoFalke) fa808fb74972637840675e310f6d4a0f06028d61 refactor: Make error() return type void (MarcoFalke) fa1d62434843866d242bff9f9c55cb838a4f0d83 scripted-diff: return error(...); ==> error(...); return false; (MarcoFalke) fa9a5e80ab86c997102a1c3d4ba017bbe86641d5 refactor: Add missing {} around error() calls (MarcoFalke) Pull request description: `error(...)` has many issues: * It is often used in the context of `return error(...)`, implying that it has a "fancy" type, creating confusion with `util::Result/Error` * `-logsourcelocations` does not work with it, because it will pretend the error happened inside of `logging.h` * The log line contains `ERROR: `, as opposed to `[error]`, like for other errors logged with `LogError`. Fix all issues by removing it. ACKs for top commit: fjahr: re-utACK fa391513949b7a3b56321436e2015c7e9e6dac2b stickies-v: re-ACK fa391513949b7a3b56321436e2015c7e9e6dac2b, no changes since 4a903741b0 ryanofsky: Code review ACK fa391513949b7a3b56321436e2015c7e9e6dac2b. Just rebase since last review Tree-SHA512: ec5bb502ab0d3733fdb14a8a00762638fce0417afd8dd6294ae0d485ce2b7ca5b1efeb50fc2cd7467f6c652e4ed3e99b0f283b08aeca04bbfb7ea4f2c95d283a Rebase and fixing blockstorage.cpp file src/validation.cpp - Fixing linting error --- src/addrdb.cpp | 14 ++-- src/flatfile.cpp | 9 +- src/index/base.cpp | 3 +- src/index/blockfilterindex.cpp | 38 ++++++--- src/index/coinstatsindex.cpp | 24 ++++-- src/index/txindex.cpp | 12 ++- src/kernel/coinstats.cpp | 3 +- src/net.cpp | 2 +- src/netbase.cpp | 54 ++++++++---- src/node/blockstorage.cpp | 33 +++++--- src/script/signingprovider.cpp | 6 +- src/validation.cpp | 147 +++++++++++++++++++++------------ 12 files changed, 229 insertions(+), 116 deletions(-) diff --git a/src/addrdb.cpp b/src/addrdb.cpp index 83bc6192ff7b..158407452816 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -39,7 +39,8 @@ bool SerializeDB(Stream& stream, const Data& data) hashwriter << Params().MessageStart() << data; stream << hashwriter.GetHash(); } catch (const std::exception& e) { - return error("%s: Serialize or I/O error - %s", __func__, e.what()); + LogError("%s: Serialize or I/O error - %s\n", __func__, e.what()); + return false; } return true; @@ -59,7 +60,8 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data if (fileout.IsNull()) { fileout.fclose(); remove(pathTmp); - return error("%s: Failed to open file %s", __func__, fs::PathToString(pathTmp)); + LogError("%s: Failed to open file %s\n", __func__, fs::PathToString(pathTmp)); + return false; } // Serialize @@ -71,14 +73,16 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data if (!FileCommit(fileout.Get())) { fileout.fclose(); remove(pathTmp); - return error("%s: Failed to flush file %s", __func__, fs::PathToString(pathTmp)); + LogError("%s: Failed to flush file %s\n", __func__, fs::PathToString(pathTmp)); + return false; } fileout.fclose(); // replace existing file, if any, with new file if (!RenameOver(pathTmp, path)) { remove(pathTmp); - return error("%s: Rename-into-place failed", __func__); + LogError("%s: Rename-into-place failed\n", __func__); + return false; } return true; @@ -136,7 +140,7 @@ bool CBanDB::Write(const banmap_t& banSet) } for (const auto& err : errors) { - error("%s", err); + LogError("%s\n", err); } return false; } diff --git a/src/flatfile.cpp b/src/flatfile.cpp index 0fecf4f50499..1b4433186ebb 100644 --- a/src/flatfile.cpp +++ b/src/flatfile.cpp @@ -82,15 +82,18 @@ bool FlatFileSeq::Flush(const FlatFilePos& pos, bool finalize) { FILE* file = Open(FlatFilePos(pos.nFile, 0)); // Avoid fseek to nPos if (!file) { - return error("%s: failed to open file %d", __func__, pos.nFile); + LogError("%s: failed to open file %d\n", __func__, pos.nFile); + return false; } if (finalize && !TruncateFile(file, pos.nPos)) { fclose(file); - return error("%s: failed to truncate file %d", __func__, pos.nFile); + LogError("%s: failed to truncate file %d\n", __func__, pos.nFile); + return false; } if (!FileCommit(file)) { fclose(file); - return error("%s: failed to commit file %d", __func__, pos.nFile); + LogError("%s: failed to commit file %d\n", __func__, pos.nFile); + return false; } DirectoryCommit(m_dir); diff --git a/src/index/base.cpp b/src/index/base.cpp index d585cac6dd53..0a02e76a2a0c 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -204,7 +204,8 @@ bool BaseIndex::Commit() { CDBBatch batch(GetDB()); if (!CommitInternal(batch) || !GetDB().WriteBatch(batch)) { - return error("%s: Failed to commit latest %s state", __func__, GetName()); + LogError("%s: Failed to commit latest %s state\n", __func__, GetName()); + return false; } return true; } diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index 5449a93b0954..635064afdc9b 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -146,8 +146,9 @@ bool BlockFilterIndex::Init() // indicate database corruption or a disk failure, and starting the index would cause // further corruption. if (m_db->Exists(DB_FILTER_POS)) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } // If the DB_FILTER_POS is not set, then initialize to the first location. @@ -169,10 +170,12 @@ bool BlockFilterIndex::CommitInternal(CDBBatch& batch) // Flush current filter file to disk. AutoFile file{m_filter_fileseq->Open(pos)}; if (file.IsNull()) { - return error("%s: Failed to open filter file %d", __func__, pos.nFile); + LogError("%s: Failed to open filter file %d\n", __func__, pos.nFile); + return false; } if (!FileCommit(file.Get())) { - return error("%s: Failed to commit filter file %d", __func__, pos.nFile); + LogError("%s: Failed to commit filter file %d\n", __func__, pos.nFile); + return false; } batch.Write(DB_FILTER_POS, pos); @@ -191,11 +194,15 @@ bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, const uint256& std::vector encoded_filter; try { filein >> block_hash >> encoded_filter; - if (Hash(encoded_filter) != hash) return error("Checksum mismatch in filter decode."); + if (Hash(encoded_filter) != hash) { + LogError("Checksum mismatch in filter decode.\n"); + return false; + } filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter), /*skip_decode_check=*/true); } catch (const std::exception& e) { - return error("%s: Failed to deserialize block filter from disk: %s", __func__, e.what()); + LogError("%s: Failed to deserialize block filter from disk: %s\n", __func__, e.what()); + return false; } return true; @@ -264,8 +271,9 @@ bool BlockFilterIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex uint256 expected_block_hash = pindex->pprev->GetBlockHash(); if (read_out.first != expected_block_hash) { - return error("%s: previous block header belongs to unexpected block %s; expected %s", + LogError("%s: previous block header belongs to unexpected block %s; expected %s\n", __func__, read_out.first.ToString(), expected_block_hash.ToString()); + return false; } prev_header = read_out.second.header; @@ -299,14 +307,16 @@ bool BlockFilterIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex for (int height = start_height; height <= stop_height; ++height) { if (!db_it.GetKey(key) || key.height != height) { - return error("%s: unexpected key in %s: expected (%c, %d)", + LogError("%s: unexpected key in %s: expected (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } std::pair value; if (!db_it.GetValue(value)) { - return error("%s: unable to read value in %s at key (%c, %d)", + LogError("%s: unable to read value in %s at key (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } batch.Write(DBHashKey(value.first), std::move(value.second)); @@ -361,11 +371,13 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start const CBlockIndex* stop_index, std::vector& results) { if (start_height < 0) { - return error("%s: start height (%d) is negative", __func__, start_height); + LogError("%s: start height (%d) is negative\n", __func__, start_height); + return false; } if (start_height > stop_index->nHeight) { - return error("%s: start height (%d) is greater than stop height (%d)", + LogError("%s: start height (%d) is greater than stop height (%d)\n", __func__, start_height, stop_index->nHeight); + return false; } size_t results_size = static_cast(stop_index->nHeight - start_height + 1); @@ -381,8 +393,9 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start size_t i = static_cast(height - start_height); if (!db_it->GetValue(values[i])) { - return error("%s: unable to read value in %s at key (%c, %d)", + LogError("%s: unable to read value in %s at key (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } db_it->Next(); @@ -404,8 +417,9 @@ static bool LookupRange(CDBWrapper& db, const std::string& index_name, int start } if (!db.Read(DBHashKey(block_hash), results[i])) { - return error("%s: unable to read value in %s at key (%c, %s)", + LogError("%s: unable to read value in %s at key (%c, %s)\n", __func__, index_name, DB_BLOCK_HASH, block_hash.ToString()); + return false; } } diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index 49c81ba1efbc..5a53920f5806 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -135,8 +135,9 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) read_out.first.ToString(), expected_block_hash.ToString()); if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { - return error("%s: previous block header not found; expected %s", + LogError("%s: previous block header not found; expected %s\n", __func__, expected_block_hash.ToString()); + return false; } } @@ -241,14 +242,16 @@ bool CoinStatsIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex) for (int height = start_height; height <= stop_height; ++height) { if (!db_it.GetKey(key) || key.height != height) { - return error("%s: unexpected key in %s: expected (%c, %d)", + LogError("%s: unexpected key in %s: expected (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } std::pair value; if (!db_it.GetValue(value)) { - return error("%s: unable to read value in %s at key (%c, %d)", + LogError("%s: unable to read value in %s at key (%c, %d)\n", __func__, index_name, DB_BLOCK_HEIGHT, height); + return false; } batch.Write(DBHashKey(value.first), std::move(value.second)); @@ -283,8 +286,9 @@ bool CoinStatsIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* n CBlock block; if (!ReadBlockFromDisk(block, iter_tip, consensus_params)) { - return error("%s: Failed to read block %s from disk", + LogError("%s: Failed to read block %s from disk\n", __func__, iter_tip->GetBlockHash().ToString()); + return false; } if (!ReverseBlock(block, iter_tip)) { @@ -351,8 +355,9 @@ bool CoinStatsIndex::Init() // exist. Any other errors indicate database corruption or a disk // failure, and starting the index would cause further corruption. if (m_db->Exists(DB_MUHASH)) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } } @@ -363,14 +368,16 @@ bool CoinStatsIndex::Init() if (pindex) { DBVal entry; if (!LookUpOne(*m_db, pindex, entry)) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } uint256 out; m_muhash.Finalize(out); if (entry.muhash != out) { - return error("%s: Cannot read current %s state; index may be corrupted", + LogError("%s: Cannot read current %s state; index may be corrupted\n", __func__, GetName()); + return false; } m_transaction_output_count = entry.transaction_output_count; m_bogo_size = entry.bogo_size; @@ -422,8 +429,9 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex read_out.first.ToString(), expected_block_hash.ToString()); if (!m_db->Read(DBHashKey(expected_block_hash), read_out)) { - return error("%s: previous block header not found; expected %s", + LogError("%s: previous block header not found; expected %s\n", __func__, expected_block_hash.ToString()); + return false; } } } diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp index 1a6a0cf7fe01..a915231c4019 100644 --- a/src/index/txindex.cpp +++ b/src/index/txindex.cpp @@ -82,20 +82,24 @@ bool TxIndex::FindTx(const uint256& tx_hash, uint256& block_hash, CTransactionRe AutoFile file{OpenBlockFile(postx, true)}; if (file.IsNull()) { - return error("%s: OpenBlockFile failed", __func__); + LogError("%s: OpenBlockFile failed\n", __func__); + return false; } CBlockHeader header; try { file >> header; if (fseek(file.Get(), postx.nTxOffset, SEEK_CUR)) { - return error("%s: fseek(...) failed", __func__); + LogError("%s: fseek(...) failed\n", __func__); + return false; } file >> tx; } catch (const std::exception& e) { - return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what()); + return false; } if (tx->GetHash() != tx_hash) { - return error("%s: txid mismatch", __func__); + LogError("%s: txid mismatch\n", __func__); + return false; } block_hash = header.GetHash(); return true; diff --git a/src/kernel/coinstats.cpp b/src/kernel/coinstats.cpp index 5e8549efcb45..a649aab470fb 100644 --- a/src/kernel/coinstats.cpp +++ b/src/kernel/coinstats.cpp @@ -120,7 +120,8 @@ static bool ComputeUTXOStats(CCoinsView* view, CCoinsStats& stats, T hash_obj, c outputs[key.n] = std::move(coin); stats.coins_count++; } else { - return error("%s: unable to read value", __func__); + LogError("%s: unable to read value\n", __func__); + return false; } pcursor->Next(); } diff --git a/src/net.cpp b/src/net.cpp index 21648dbd01c5..a3a1a9ade7ad 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -570,7 +570,7 @@ void CNode::SetAddrLocal(const CService& addrLocalIn) { AssertLockNotHeld(m_addr_local_mutex); LOCK(m_addr_local_mutex); if (addrLocal.IsValid()) { - error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort()); + LogError("Addr local already set for node: %i. Refusing to change from %s to %s\n", id, addrLocal.ToStringAddrPort(), addrLocalIn.ToStringAddrPort()); } else { addrLocal = addrLocalIn; } diff --git a/src/netbase.cpp b/src/netbase.cpp index 9144f5bd20e0..b6e81795d08e 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -372,7 +372,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a IntrRecvError recvr; LogPrint(BCLog::NET, "SOCKS5 connecting %s\n", strDest); if (strDest.size() > 255) { - return error("Hostname too long"); + LogError("Hostname too long\n"); + return false; } // Construct the version identifier/method selection message std::vector vSocks5Init; @@ -387,7 +388,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a } ssize_t ret = sock.Send(vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5Init.size()) { - return error("Error sending to proxy"); + LogError("Error sending to proxy\n"); + return false; } uint8_t pchRet1[2]; if (InterruptibleRecv(pchRet1, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { @@ -395,34 +397,42 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a return false; } if (pchRet1[0] != SOCKSVersion::SOCKS5) { - return error("Proxy failed to initialize"); + LogError("Proxy failed to initialize\n"); + return false; } if (pchRet1[1] == SOCKS5Method::USER_PASS && auth) { // Perform username/password authentication (as described in RFC1929) std::vector vAuth; vAuth.push_back(0x01); // Current (and only) version of user/pass subnegotiation - if (auth->username.size() > 255 || auth->password.size() > 255) - return error("Proxy username or password too long"); + if (auth->username.size() > 255 || auth->password.size() > 255) { + LogError("Proxy username or password too long\n"); + return false; + } + vAuth.push_back(auth->username.size()); vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end()); vAuth.push_back(auth->password.size()); vAuth.insert(vAuth.end(), auth->password.begin(), auth->password.end()); ret = sock.Send(vAuth.data(), vAuth.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vAuth.size()) { - return error("Error sending authentication to proxy"); + LogError("Error sending authentication to proxy\n"); + return false; } LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password); uint8_t pchRetA[2]; if (InterruptibleRecv(pchRetA, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { - return error("Error reading proxy authentication response"); + LogError("Error reading proxy authentication response\n"); + return false; } if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) { - return error("Proxy authentication unsuccessful"); + LogError("Proxy authentication unsuccessful\n"); + return false; } } else if (pchRet1[1] == SOCKS5Method::NOAUTH) { // Perform no authentication } else { - return error("Proxy requested wrong authentication method %02x", pchRet1[1]); + LogError("Proxy requested wrong authentication method %02x\n", pchRet1[1]); + return false; } std::vector vSocks5; vSocks5.push_back(SOCKSVersion::SOCKS5); // VER protocol version @@ -435,7 +445,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a vSocks5.push_back((port >> 0) & 0xFF); ret = sock.Send(vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5.size()) { - return error("Error sending to proxy"); + LogError("Error sending to proxy\n"); + return false; } uint8_t pchRet2[4]; if ((recvr = InterruptibleRecv(pchRet2, 4, g_socks5_recv_timeout, sock)) != IntrRecvError::OK) { @@ -445,11 +456,13 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a * error message. */ return false; } else { - return error("Error while reading proxy response"); + LogError("Error while reading proxy response\n"); + return false; } } if (pchRet2[0] != SOCKSVersion::SOCKS5) { - return error("Proxy failed to accept request"); + LogError("Proxy failed to accept request\n"); + return false; } if (pchRet2[1] != SOCKS5Reply::SUCCEEDED) { // Failures to connect to a peer that are not proxy errors @@ -457,7 +470,8 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a return false; } if (pchRet2[2] != 0x00) { // Reserved field must be 0 - return error("Error: malformed proxy response"); + LogError("Error: malformed proxy response\n"); + return false; } uint8_t pchRet3[256]; switch (pchRet2[3]) @@ -468,19 +482,25 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a { recvr = InterruptibleRecv(pchRet3, 1, g_socks5_recv_timeout, sock); if (recvr != IntrRecvError::OK) { - return error("Error reading from proxy"); + LogError("Error reading from proxy\n"); + return false; } int nRecv = pchRet3[0]; recvr = InterruptibleRecv(pchRet3, nRecv, g_socks5_recv_timeout, sock); break; } - default: return error("Error: malformed proxy response"); + default: { + LogError("Error: malformed proxy response\n"); + return false; + } } if (recvr != IntrRecvError::OK) { - return error("Error reading from proxy"); + LogError("Error reading from proxy\n"); + return false; } if (InterruptibleRecv(pchRet3, 2, g_socks5_recv_timeout, sock) != IntrRecvError::OK) { - return error("Error reading from proxy"); + LogError("Error reading from proxy\n"); + return false; } LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest); return true; diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index d6606442475a..0e89d24f3e7f 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -291,7 +291,8 @@ bool BlockManager::LoadBlockIndex() for (CBlockIndex* pindex : vSortedByHeight) { if (ShutdownRequested()) return false; if (previous_index && pindex->nHeight > previous_index->nHeight + 1) { - return error("%s: block index is non-contiguous, index of height %d missing", __func__, previous_index->nHeight + 1); + LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1); + return false; } previous_index = pindex; pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex); @@ -489,7 +490,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const // Open history file to append CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { - return error("%s: OpenUndoFile failed", __func__); + LogError("%s: OpenUndoFile failed\n", __func__); + return false; } // Write index header @@ -499,7 +501,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const // Write undo data long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { - return error("%s: ftell failed", __func__); + LogError("%s: ftell failed\n", __func__); + return false; } pos.nPos = (unsigned int)fileOutPos; fileout << blockundo; @@ -518,13 +521,15 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex) const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())}; if (pos.IsNull()) { - return error("%s: no undo data available", __func__); + LogError("%s: no undo data available\n", __func__); + return false; } // Open history file to read CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); if (filein.IsNull()) { - return error("%s: OpenUndoFile failed", __func__); + LogError("%s: OpenUndoFile failed\n", __func__); + return false; } // Read block @@ -535,12 +540,14 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex) verifier >> blockundo; filein >> hashChecksum; } catch (const std::exception& e) { - return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + LogError("%s: Deserialize or I/O error - %s\n", __func__, e.what()); + return false; } // Verify checksum if (hashChecksum != verifier.GetHash()) { - return error("%s: Checksum mismatch", __func__); + LogError("%s: Checksum mismatch\n", __func__); + return false; } return true; @@ -708,7 +715,8 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa // Open history file to append CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); if (fileout.IsNull()) { - return error("WriteBlockToDisk: OpenBlockFile failed"); + LogError("WriteBlockToDisk: OpenBlockFile failed\n"); + return false; } // Write index header @@ -718,7 +726,8 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa // Write block long fileOutPos = ftell(fileout.Get()); if (fileOutPos < 0) { - return error("WriteBlockToDisk: ftell failed"); + LogError("WriteBlockToDisk: ftell failed\n"); + return false; } pos.nPos = (unsigned int)fileOutPos; fileout << block; @@ -733,7 +742,8 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid if (block.GetUndoPos().IsNull()) { FlatFilePos _pos; if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) { - return error("ConnectBlock(): FindUndoPos failed"); + LogError("ConnectBlock(): FindUndoPos failed\n"); + return false; } if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash(), GetParams().MessageStart())) { return AbortNode(state, "Failed to write undo data"); @@ -796,6 +806,7 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus if (*hash != pindex->GetBlockHash()) { return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", pindex->ToString(), block_pos.ToString()); + return false; } return true; } @@ -814,7 +825,7 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CCha nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); } if (!FindBlockPos(blockPos, nBlockSize, nHeight, active_chain, block.GetBlockTime(), position_known)) { - error("%s: FindBlockPos failed", __func__); + LogError("%s: FindBlockPos failed\n", __func__); return FlatFilePos(); } if (!position_known) { diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp index 3f6c9c2ad7c8..2eee235126ad 100644 --- a/src/script/signingprovider.cpp +++ b/src/script/signingprovider.cpp @@ -110,8 +110,10 @@ bool FillableSigningProvider::GetKey(const CKeyID &address, CKey &keyOut) const bool FillableSigningProvider::AddCScript(const CScript& redeemScript) { - if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) - return error("FillableSigningProvider::AddCScript(): redeemScripts > %i bytes are invalid", MAX_SCRIPT_ELEMENT_SIZE); + if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) { + LogError("FillableSigningProvider::AddCScript(): redeemScripts > %i bytes are invalid\n", MAX_SCRIPT_ELEMENT_SIZE); + return false; + } LOCK(cs_KeyStore); mapScripts[CScriptID(redeemScript)] = redeemScript; diff --git a/src/validation.cpp b/src/validation.cpp index 2a16f339b631..8a77f9c80bbc 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -771,8 +771,10 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) return false; // state filled in by CheckTransaction } - if (!ContextualCheckTransaction(tx, state, chainparams.GetConsensus(), m_active_chainstate.m_chain.Tip())) - return error("%s: ContextualCheckTransaction: %s, %s", __func__, hash.ToString(), state.ToString()); + if (!ContextualCheckTransaction(tx, state, chainparams.GetConsensus(), m_active_chainstate.m_chain.Tip())) { + LogError("%s: ContextualCheckTransaction: %s, %s\n", __func__, hash.ToString(), state.ToString()); + return false; + } if (tx.IsSpecialTxVersion() && tx.nType == TRANSACTION_QUORUM_COMMITMENT) { // quorum commitment is not allowed outside of blocks @@ -2019,12 +2021,12 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI CBlockUndo blockUndo; if (!UndoReadFromDisk(blockUndo, pindex)) { - error("DisconnectBlock(): failure reading undo data"); + LogError("DisconnectBlock(): failure reading undo data\n"); return DISCONNECT_FAILED; } if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) { - error("DisconnectBlock(): block and undo data inconsistent"); + LogError("DisconnectBlock(): block and undo data inconsistent\n"); return DISCONNECT_FAILED; } @@ -2092,7 +2094,7 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI if (i > 0) { // not coinbases CTxUndo &txundo = blockUndo.vtxundo[i-1]; if (txundo.vprevout.size() != tx.vin.size()) { - error("DisconnectBlock(): transaction and undo data inconsistent"); + LogError("DisconnectBlock(): transaction and undo data inconsistent\n"); return DISCONNECT_FAILED; } for (unsigned int j = tx.vin.size(); j > 0;) { @@ -2313,7 +2315,8 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, // problems. return AbortNode(state, "Corrupt block found indicating potential hardware failure; shutting down"); } - return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString()); + LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString()); + return false; } if (pindex->pprev && pindex->phashBlock && m_chain_helper->HasConflictingChainLock(pindex->nHeight, pindex->GetBlockHash())) { @@ -2457,8 +2460,9 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, // MUST process special txes before updating UTXO to ensure consistency between mempool and block processing std::optional mnlist_updates_opt{std::nullopt}; if (!m_chain_helper->special_tx->ProcessSpecialTxsInBlock(block, pindex, view, fJustCheck, fScriptChecks, state, mnlist_updates_opt)) { - return error("ConnectBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s", - pindex->GetBlockHash().ToString(), state.ToString()); + LogError("ConnectBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s\n", + pindex->GetBlockHash().ToString(), state.ToString()); + return false; } int64_t nTime2_1 = GetTimeMicros(); nTimeProcessSpecial += nTime2_1 - nTime2; @@ -2479,7 +2483,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, TxValidationState tx_state; if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) { // Any transaction validation failure in ConnectBlock is a block consensus failure - LogPrintf("ERROR: %s: Consensus::CheckTxInputs: %s, %s\n", __func__, tx.GetHash().ToString(), state.ToString()); + LogError("%s: Consensus::CheckTxInputs: %s, %s\n", __func__, tx.GetHash().ToString(), state.ToString()); return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), tx_state.GetDebugMessage()); } @@ -2553,7 +2557,7 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state, // Any transaction validation failure in ConnectBlock is a block consensus failure state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), tx_state.GetDebugMessage()); - LogPrintf("ERROR: ConnectBlock(): CheckInputScripts on %s failed with %s\n", + LogError("ConnectBlock(): CheckInputScripts on %s failed with %s\n", tx.GetHash().ToString(), state.ToString()); return false; } @@ -3046,7 +3050,8 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr std::shared_ptr pblock = std::make_shared(); CBlock& block = *pblock; if (!ReadBlockFromDisk(block, pindexDelete, m_params.GetConsensus())) { - return error("DisconnectTip(): Failed to read block"); + LogError("DisconnectTip(): Failed to read block\n"); + return false; } // Apply the block atomically to the chain state. int64_t nStart = GetTimeMicros(); @@ -3055,8 +3060,10 @@ bool CChainState::DisconnectTip(BlockValidationState& state, DisconnectedBlockTr CCoinsViewCache view(&CoinsTip()); assert(view.GetBestBlock() == pindexDelete->GetBlockHash()); - if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) - return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); + if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) { + LogError("DisconnectTip(): DisconnectBlock %s failed\n", pindexDelete->GetBlockHash().ToString()); + return false; + } bool flushed = view.Flush(); assert(flushed); dbTx->Commit(); @@ -3200,7 +3207,8 @@ bool CChainState::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew if (!rv) { if (state.IsInvalid()) InvalidBlockFound(pindexNew, state); - return error("%s: ConnectBlock %s failed, %s", __func__, pindexNew->GetBlockHash().ToString(), state.ToString()); + LogError("%s: ConnectBlock %s failed, %s\n", __func__, pindexNew->GetBlockHash().ToString(), state.ToString()); + return false; } nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; assert(nBlocksTotal > 0); @@ -4419,7 +4427,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr& pblock, Block pindex->nStatus |= BLOCK_FAILED_VALID; m_blockman.m_dirty_blockindex.insert(pindex); } - return error("%s: %s", __func__, state.ToString()); + LogError("%s: %s\n", __func__, state.ToString()); + return false; } // Header is valid/has work, merkle tree is good...RELAY NOW @@ -4478,15 +4487,18 @@ bool ChainstateManager::ProcessNewBlock(const std::shared_ptr& blo } if (!ret) { GetMainSignals().BlockChecked(*block, state); - return error("%s: AcceptBlock FAILED: %s", __func__, state.ToString()); + LogError("%s: AcceptBlock FAILED (%s)\n", __func__, state.ToString()); + return false; } } NotifyHeaderTip(ActiveChainstate()); BlockValidationState state; // Only used to report errors, not invalidity - ignore it - if (!ActiveChainstate().ActivateBestChain(state, block)) - return error("%s: ActivateBestChain failed: %s", __func__, state.ToString()); + if (!ActiveChainstate().ActivateBestChain(state, block)) { + LogError("%s: ActivateBestChain failed (%s)\n", __func__, state.ToString()); + return false; + } LogPrintf("%s : ACCEPTED\n", __func__); return true; @@ -4540,12 +4552,18 @@ bool TestBlockValidity(BlockValidationState& state, auto dbTx = evoDb.BeginTransaction(); // NOTE: CheckBlockHeader is called by CheckBlock - if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev, GetAdjustedTime())) - return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString()); - if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) - return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString()); - if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev)) - return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString()); + if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainstate.m_chainman, pindexPrev, GetAdjustedTime())) { + LogError("%s: Consensus::ContextualCheckBlockHeader: %s\n", __func__, state.ToString()); + return false; + } + if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) { + LogError("%s: Consensus::CheckBlock: %s\n", __func__, state.ToString()); + return false; + } + if (!ContextualCheckBlock(block, state, chainstate.m_chainman, pindexPrev)) { + LogError("%s: Consensus::ContextualCheckBlock: %s\n", __func__, state.ToString()); + return false; + } if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, true)) return false; @@ -4669,19 +4687,22 @@ bool CVerifyDB::VerifyDB( CBlock block; // check level 0: read from disk if (!ReadBlockFromDisk(block, pindex, consensus_params)) { - return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) { - return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, - pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + LogError("%s: *** found bad block at %d, hash=%s (%s)\n", __func__, + pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + return false; } // check level 2: verify undo validity if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; if (!pindex->GetUndoPos().IsNull()) { if (!UndoReadFromDisk(undo, pindex)) { - return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } } } @@ -4693,7 +4714,8 @@ bool CVerifyDB::VerifyDB( assert(coins.GetBestBlock() == pindex->GetBlockHash()); DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { - return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } if (res == DISCONNECT_UNCLEAN) { nGoodTransactions = 0; @@ -4708,7 +4730,8 @@ bool CVerifyDB::VerifyDB( if (ShutdownRequested()) return true; } if (pindexFailure) { - return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions); + LogError("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions); + return false; } if (skipped_l3_checks) { LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n"); @@ -4728,10 +4751,14 @@ bool CVerifyDB::VerifyDB( uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false); pindex = chainstate.m_chain.Next(pindex); CBlock block; - if (!ReadBlockFromDisk(block, pindex, consensus_params)) - return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); - if (!chainstate.ConnectBlock(block, state, pindex, coins)) - return error("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + if (!ReadBlockFromDisk(block, pindex, consensus_params)) { + LogError("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; + } + if (!chainstate.ConnectBlock(block, state, pindex, coins)) { + LogError("VerifyDB(): *** found unconnectable block at %d, hash=%s (%s)\n", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + return false; + } if (ShutdownRequested()) return true; } } @@ -4751,15 +4778,17 @@ bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& i // TODO: merge with ConnectBlock CBlock block; if (!ReadBlockFromDisk(block, pindex, m_params.GetConsensus())) { - return error("RollforwardBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("RollforwardBlock(): ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); + return false; } // MUST process special txes before updating UTXO to ensure consistency between mempool and block processing BlockValidationState state; std::optional mnlist_updates_opt{std::nullopt}; if (!m_chain_helper->special_tx->ProcessSpecialTxsInBlock(block, pindex, inputs, false /*fJustCheck*/, false /*fScriptChecks*/, state, mnlist_updates_opt)) { - return error("RollforwardBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s", - pindex->GetBlockHash().ToString(), state.ToString()); + LogError("RollforwardBlock(DASH): ProcessSpecialTxsInBlock for block %s failed with %s\n", + pindex->GetBlockHash().ToString(), state.ToString()); + return false; } std::vector addressIndex; @@ -4828,22 +4857,28 @@ bool CChainState::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& i if (fAddressIndex) { if (!m_blockman.m_block_tree_db->WriteAddressIndex(addressIndex)) { - return error("RollforwardBlock(DASH): Failed to write address index"); + LogError("RollforwardBlock(DASH): Failed to write address index\n"); + return false; } if (!m_blockman.m_block_tree_db->UpdateAddressUnspentIndex(addressUnspentIndex)) { - return error("RollforwardBlock(DASH): Failed to write address unspent index"); + LogError("RollforwardBlock(DASH): Failed to write address unspent index\n"); + return false; } } if (fSpentIndex) { - if (!m_blockman.m_block_tree_db->UpdateSpentIndex(spentIndex)) - return error("RollforwardBlock(DASH): Failed to write transaction index"); + if (!m_blockman.m_block_tree_db->UpdateSpentIndex(spentIndex)) { + LogError("RollforwardBlock(DASH): Failed to write transaction index\n"); + return false; + } } if (fTimestampIndex) { - if (!m_blockman.m_block_tree_db->WriteTimestampIndex(CTimestampIndexKey(pindex->nTime, pindex->GetBlockHash()))) - return error("RollforwardBlock(DASH): Failed to write timestamp index"); + if (!m_blockman.m_block_tree_db->WriteTimestampIndex(CTimestampIndexKey(pindex->nTime, pindex->GetBlockHash()))) { + LogError("RollforwardBlock(DASH): Failed to write timestamp index\n"); + return false; + } } return true; @@ -4858,7 +4893,10 @@ bool CChainState::ReplayBlocks() std::vector hashHeads = db.GetHeadBlocks(); if (hashHeads.empty()) return true; // We're already in a consistent state. - if (hashHeads.size() != 2) return error("ReplayBlocks(): unknown inconsistent state"); + if (hashHeads.size() != 2) { + LogError("ReplayBlocks(): unknown inconsistent state\n"); + return false; + } uiInterface.ShowProgress(_("Replaying blocks…").translated, 0, false); LogPrintf("Replaying blocks\n"); @@ -4868,20 +4906,23 @@ bool CChainState::ReplayBlocks() const CBlockIndex* pindexFork = nullptr; // Latest block common to both the old and the new tip. if (m_blockman.m_block_index.count(hashHeads[0]) == 0) { - return error("ReplayBlocks(): reorganization to unknown block requested"); + LogError("ReplayBlocks(): reorganization to unknown block requested\n"); + return false; } pindexNew = &(m_blockman.m_block_index[hashHeads[0]]); if (!hashHeads[1].IsNull()) { // The old tip is allowed to be 0, indicating it's the first flush. if (m_blockman.m_block_index.count(hashHeads[1]) == 0) { - return error("ReplayBlocks(): reorganization from unknown block requested"); + LogError("ReplayBlocks(): reorganization from unknown block requested\n"); + return false; } pindexOld = &(m_blockman.m_block_index[hashHeads[1]]); pindexFork = LastCommonAncestor(pindexOld, pindexNew); assert(pindexFork != nullptr); const bool fDIP0003Active = DeploymentActiveAt(*pindexOld, m_params.GetConsensus(), Consensus::DEPLOYMENT_DIP0003); if (fDIP0003Active && !m_evoDb.VerifyBestBlock(pindexOld->GetBlockHash())) { - return error("ReplayBlocks(DASH): Found EvoDB inconsistency"); + LogError("ReplayBlocks(DASH): Found EvoDB inconsistency"); + return false; } } @@ -4892,12 +4933,14 @@ bool CChainState::ReplayBlocks() if (pindexOld->nHeight > 0) { // Never disconnect the genesis block. CBlock block; if (!ReadBlockFromDisk(block, pindexOld, m_params.GetConsensus())) { - return error("ReplayBlocks(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + LogError("ReplayBlocks(): ReadBlockFromDisk() failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + return false; } LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight); DisconnectResult res = DisconnectBlock(block, pindexOld, cache); if (res == DISCONNECT_FAILED) { - return error("ReplayBlocks(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + LogError("ReplayBlocks(): DisconnectBlock failed at %d, hash=%s", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + return false; } // If DISCONNECT_UNCLEAN is returned, it means a non-existing UTXO was deleted, or an existing UTXO was // overwritten. It corresponds to cases where the block-to-be-disconnect never had all its operations @@ -5045,7 +5088,8 @@ bool CChainState::AddGenesisBlock(const CBlock& block, BlockValidationState& sta { FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0, m_chain, nullptr)}; if (blockPos.IsNull()) { - return error("%s: writing genesis block to disk failed (%s)", __func__, state.ToString()); + LogError("AddGenesisBlock: writing genesis block to disk failed (%s)\n", state.ToString()); + return false; } CBlockIndex* pindex = m_blockman.AddToBlockIndex(block, block.GetHash(), m_chainman.m_best_header); ReceivedBlockTransactions(block, pindex, blockPos); @@ -5079,7 +5123,8 @@ bool CChainState::LoadGenesisBlock() return false; } } catch (const std::runtime_error &e) { - return error("%s: failed to initialize block database: %s", __func__, e.what()); + LogError("%s: failed to initialize block database: %s", __func__, e.what()); + return false; } return true; From 6bc3cc0907319f609711db35a644d9bd25d9075b Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 12 Mar 2024 18:43:13 +0000 Subject: [PATCH 4/8] Merge bitcoin/bitcoin#29633: log: Remove error() reference d0e6564240857994db53d06f66ea09da0edbaf0f log: Remove error() reference (Fabian Jahr) Pull request description: Mini-followup to #29236 that was just merged. Removes a reference to `error()` that was missed in a comment. ACKs for top commit: ryanofsky: Code review ACK d0e6564240857994db53d06f66ea09da0edbaf0f. Just dropped LogPrintf reference since last review stickies-v: ACK d0e6564240857994db53d06f66ea09da0edbaf0f Empact: ACK https://github.com/bitcoin/bitcoin/pull/29633/commits/d0e6564240857994db53d06f66ea09da0edbaf0f Tree-SHA512: 8abe4895951013c2ceca9a57743aacabaf8af831d07eee9ae8372c121c16e88b7226f0e537200c3464792e19ac7e03b57ba0be31f43add8802753972b0aefc48 --- src/logging.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/logging.h b/src/logging.h index d0b2069319f0..cc37a65a8449 100644 --- a/src/logging.h +++ b/src/logging.h @@ -257,7 +257,7 @@ std::string SafeStringFormat(const std::string& fmt, const Args&... args) } } -// Be conservative when using LogPrintf/error or other things which +// Be conservative when using functions that // unconditionally log to debug.log! It should not be the case that an inbound // peer can fill up a user's disk with debug.log entries. From 6dfa502ddbe0bbddc859d303cf575e175e293e3c Mon Sep 17 00:00:00 2001 From: glozow Date: Thu, 14 Mar 2024 11:14:56 +0000 Subject: [PATCH 5/8] Merge bitcoin/bitcoin#29459: test: check_mempool_result negative feerate bf264e05981e3809715f34f548138d53991db6f2 test: check_mempool_result negative feerate (kevkevin) Pull request description: Adds test coverage in `mempool_accept.py` to check if a negative `maxfeerate` is input into `check_mempool_result` Asserts "Amount out of range" error message and `-3` error code Motivated by this [comment](https://github.com/bitcoin/bitcoin/pull/29434/files#r1491112250) ACKs for top commit: maflcko: lgtm ACK bf264e05981e3809715f34f548138d53991db6f2 brunoerg: nice, utACK bf264e05981e3809715f34f548138d53991db6f2 davidgumberg: Looks great, ACK https://github.com/bitcoin/bitcoin/pull/29459/commits/bf264e05981e3809715f34f548138d53991db6f2 Tree-SHA512: 58931b774cc887c616f2fd91af3ee65cc5db55acd8e2875c76de448c80bd4e020b057c5f4f85556431377f0d0e7553771fb285d1ec20cf64f64ec92a47776b78 --- test/functional/mempool_accept.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index f424f19edead..2c8c0dc2ab60 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -77,6 +77,12 @@ def run_test(self): txid_in_block = self.wallet.sendrawtransaction(from_node=node, tx_hex=raw_tx_in_block) self.generate(node, 1) self.mempool_size = 0 + # Check negative feerate + assert_raises_rpc_error(-3, "Amount out of range", lambda: self.check_mempool_result( + result_expected=None, + rawtxs=[raw_tx_in_block], + maxfeerate=-0.01, + )) self.check_mempool_result( result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': 'txn-already-known'}], rawtxs=[raw_tx_in_block], From ffeeadf0a085d3b44165bc66a40407236937c22a Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 27 Mar 2024 11:38:11 +0000 Subject: [PATCH 6/8] Merge bitcoin/bitcoin#29479: test: Refactor subtree exclusion in lint tests 80fa7da21c470302165c47cc4a6a62fb44f997ef test: Refactor subtree exclusion in lint tests (Brandon Odiwuor) Pull request description: Fixes https://github.com/bitcoin/bitcoin/issues/17413 Refactor subtree exclusion in lint tests to one place Second attempt after PR: https://github.com/bitcoin/bitcoin/pull/24435 ACKs for top commit: fjahr: re-ACK 80fa7da21c470302165c47cc4a6a62fb44f997ef maflcko: lgtm ACK 80fa7da21c470302165c47cc4a6a62fb44f997ef davidgumberg: ACK https://github.com/bitcoin/bitcoin/commit/80fa7da21c470302165c47cc4a6a62fb44f997ef Tree-SHA512: deff7457dd19ca5ea440d3d53feae047e8863b9ddeb6494a3c94605a5d16edc91db8f99a435b4fab2ef89aedee42439562be006da647fb85bbf3def903a3ce50 --- test/lint/README.md | 5 +++++ test/lint/lint-include-guards.py | 8 +++----- test/lint/lint-includes.py | 4 +++- test/lint/lint-spelling.py | 5 ++++- test/lint/lint_ignore_dirs.py | 5 +++++ 5 files changed, 20 insertions(+), 7 deletions(-) create mode 100644 test/lint/lint_ignore_dirs.py diff --git a/test/lint/README.md b/test/lint/README.md index 704922d7abe3..b2f56bda3d62 100644 --- a/test/lint/README.md +++ b/test/lint/README.md @@ -59,3 +59,8 @@ git remote add --fetch secp256k1 https://github.com/bitcoin-core/secp256k1.git all-lint.py =========== Calls other scripts with the `lint-` prefix. + + +lint_ignore_dirs.py +=================== +Add list of common directories to ignore when running tests diff --git a/test/lint/lint-include-guards.py b/test/lint/lint-include-guards.py index 6e59c83f7dbc..c5de3b4394a5 100755 --- a/test/lint/lint-include-guards.py +++ b/test/lint/lint-include-guards.py @@ -13,15 +13,13 @@ from subprocess import check_output from typing import List +from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES + HEADER_ID_PREFIX = 'BITCOIN_' HEADER_ID_SUFFIX = '_H' EXCLUDE_FILES_WITH_PREFIX = ['src/crypto/ctaes', - 'src/leveldb', - 'src/crc32c', - 'src/secp256k1', - 'src/minisketch', 'src/tinyformat.h', 'src/bench/nanobench.h', 'src/test/fuzz/FuzzedDataProvider.h', @@ -30,7 +28,7 @@ 'src/ctpl_stl.h', 'src/dashbls', 'src/gsl', - 'src/immer'] + 'src/immer'] + SHARED_EXCLUDED_SUBTREES def _get_header_file_lst() -> List[str]: diff --git a/test/lint/lint-includes.py b/test/lint/lint-includes.py index 9a0cfa127dde..8bd4ba24c884 100755 --- a/test/lint/lint-includes.py +++ b/test/lint/lint-includes.py @@ -14,6 +14,8 @@ from subprocess import check_output, CalledProcessError +from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES + EXCLUDED_DIRS = ["src/leveldb/", "src/crc32c/", @@ -21,7 +23,7 @@ "src/minisketch/", "src/dashbls/", "src/immer/", - "src/crypto/x11/"] + "src/crypto/x11/"] + SHARED_EXCLUDED_SUBTREES EXPECTED_BOOST_INCLUDES = ["boost/date_time/posix_time/posix_time.hpp", "boost/hana/for_each.hpp", diff --git a/test/lint/lint-spelling.py b/test/lint/lint-spelling.py index fb4d2495c691..e10d2368dfc5 100755 --- a/test/lint/lint-spelling.py +++ b/test/lint/lint-spelling.py @@ -11,8 +11,11 @@ from subprocess import check_output, STDOUT, CalledProcessError +from lint_ignore_dirs import SHARED_EXCLUDED_SUBTREES + IGNORE_WORDS_FILE = 'test/lint/spelling.ignore-words.txt' -FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/dashbls/", ":(exclude)src/crc32c/", ":(exclude)src/crypto/", ":(exclude)src/ctpl_stl.h", ":(exclude)src/cxxtimer.hpp", ":(exclude)src/immer/", ":(exclude)src/leveldb/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)src/secp256k1/", ":(exclude)src/minisketch/", ":(exclude)contrib/builder-keys/", ":(exclude)contrib/guix/patches", ":(exclude)src/util/subprocess.hpp", ":(exclude)src/wallet/bip39_english.h"] +FILES_ARGS = ['git', 'ls-files', '--', ":(exclude)build-aux/m4/", ":(exclude)contrib/seeds/*.txt", ":(exclude)depends/", ":(exclude)doc/release-notes/", ":(exclude)src/dashbls/", ":(exclude)src/crypto/", ":(exclude)src/ctpl_stl.h", ":(exclude)src/cxxtimer.hpp", ":(exclude)src/immer/", ":(exclude)src/qt/locale/", ":(exclude)src/qt/*.qrc", ":(exclude)contrib/builder-keys/", ":(exclude)contrib/guix/patches", ":(exclude)src/util/subprocess.hpp", ":(exclude)src/wallet/bip39_english.h"] +FILES_ARGS += [f":(exclude){dir}" for dir in SHARED_EXCLUDED_SUBTREES] def check_codespell_install(): diff --git a/test/lint/lint_ignore_dirs.py b/test/lint/lint_ignore_dirs.py new file mode 100644 index 000000000000..af9ee7ef6bef --- /dev/null +++ b/test/lint/lint_ignore_dirs.py @@ -0,0 +1,5 @@ +SHARED_EXCLUDED_SUBTREES = ["src/leveldb/", + "src/crc32c/", + "src/secp256k1/", + "src/minisketch/", + ] From 790afc75ec959d8f9ae0c838ba5120a2e5aa437a Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 25 Apr 2024 13:43:54 -0400 Subject: [PATCH 7/8] Merge bitcoin/bitcoin#29615: test: fix accurate multisig sigop count (BIP16), add unit test 3e9c736a26724ffe3b70b387995fbf48c06300e2 test: fix accurate multisig sigop count (BIP16), add unit test (Sebastian Falbesoner) Pull request description: In the course of reviewing #29589 I noticed the following buggy call-site of `CScriptOp.decode_op_n` in the CScript's `GetSigOpCount` method: https://github.com/bitcoin/bitcoin/blob/4cc99df44aec4d104590aee46cf18318e22a8568/test/functional/test_framework/script.py#L591-L593 This should be `lastOpcode` rather than `opcode`. The latter is either OP_CHECKMULTISIG or OP_CHECKMULTISIGVERIFY at this point, so `decode_op_n` would result in an error. Also, in `CScript.raw_iter`, we have to return the op as `CScriptOp` type instead of a bare integer, otherwise we can't call the decode method on it. To prevent this in the future, add some simple unit tests for `GetSigOpCount`. Note that this was unnoticed, as the code part was never hit so far in the test framework. ACKs for top commit: achow101: ACK 3e9c736a26724ffe3b70b387995fbf48c06300e2 Christewart: ACK 3e9c736a26724ffe3b70b387995fbf48c06300e2 rkrux: tACK [3e9c736](https://github.com/bitcoin/bitcoin/pull/29615/commits/3e9c736a26724ffe3b70b387995fbf48c06300e2) hernanmarino: tACK 3e9c736a26724ffe3b70b387995fbf48c06300e2 Tree-SHA512: 51647bb6d462fbd101effd851afdbd6ad198c0567888cd4fdcac389a9fb4bd3d7e648095c6944fd8875d36272107ebaabdc62d0e2423289055588c12294d05a7 --- test/functional/test_framework/script.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index bb078591b9f4..bdc127b104a9 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -479,7 +479,7 @@ def raw_iter(self): i = 0 while i < len(self): sop_idx = i - opcode = self[i] + opcode = CScriptOp(self[i]) i += 1 if opcode > OP_PUSHDATA4: @@ -586,7 +586,7 @@ def GetSigOpCount(self, fAccurate): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): - n += opcode.decode_op_n() + n += lastOpcode.decode_op_n() else: n += 20 lastOpcode = opcode @@ -704,3 +704,17 @@ def test_cscriptnum_encoding(self): values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500] for value in values: self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value) + + def test_legacy_sigopcount(self): + # test repeated single sig ops + for n_ops in range(1, 100, 10): + for singlesig_op in (OP_CHECKSIG, OP_CHECKSIGVERIFY): + singlesigs_script = CScript([singlesig_op]*n_ops) + self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=False), n_ops) + self.assertEqual(singlesigs_script.GetSigOpCount(fAccurate=True), n_ops) + # test multisig op (including accurate counting, i.e. BIP16) + for n in range(1, 16+1): + for multisig_op in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): + multisig_script = CScript([CScriptOp.encode_op_n(n), multisig_op]) + self.assertEqual(multisig_script.GetSigOpCount(fAccurate=False), 20) + self.assertEqual(multisig_script.GetSigOpCount(fAccurate=True), n) From 11dde6258b8f21d0e33b3ecced8f6955d2808833 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 25 Apr 2024 13:55:37 -0400 Subject: [PATCH 8/8] Merge bitcoin/bitcoin#29433: contrib: rpcauth.py - Add new option (-json) to output text in json format 9adf949d2aa6d199b85295b18c08967395b5570a contrib: rpcauth.py - Add new option (-j/--json) to output text in json format (bstin) Pull request description: This is a simple change to rpcauth.py utility in order to output as json instead raw text. This is beneficial because integrating json output is simpler with multiple different forms of automation and tooling ACKs for top commit: maflcko: ACK 9adf949d2aa6d199b85295b18c08967395b5570a achow101: ACK 9adf949d2aa6d199b85295b18c08967395b5570a willcl-ark: tACK 9adf949d2aa6d199b85295b18c08967395b5570a tdb3: ACK for 9adf949d2aa6d199b85295b18c08967395b5570a Tree-SHA512: 2cdc3b2071fbe4fb32a84ce42ee8ad216cff96ed82aaef58daeb3991953ac137ae42d6898a7fdb6cbd1800e1f61ff8d292f0b150eaebdd2a3fd9d37ed7450787 --- share/rpcauth/README.md | 1 + share/rpcauth/rpcauth.py | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/share/rpcauth/README.md b/share/rpcauth/README.md index 6f627b867bca..1b3acb1dac16 100644 --- a/share/rpcauth/README.md +++ b/share/rpcauth/README.md @@ -15,4 +15,5 @@ positional arguments: optional arguments: -h, --help show this help message and exit + -j, --json output data in json format ``` diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py index 6f94f8fe770c..70b3706f3c0c 100755 --- a/share/rpcauth/rpcauth.py +++ b/share/rpcauth/rpcauth.py @@ -7,6 +7,7 @@ from getpass import getpass from secrets import token_hex, token_urlsafe import hmac +import json def generate_salt(size): """Create size byte hex salt""" @@ -24,6 +25,7 @@ def main(): parser = ArgumentParser(description='Create login credentials for a JSON-RPC user') parser.add_argument('username', help='the username for authentication') parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?') + parser.add_argument("-j", "--json", help="output to json instead of plain-text", action='store_true') args = parser.parse_args() if not args.password: @@ -35,9 +37,13 @@ def main(): salt = generate_salt(16) password_hmac = password_to_hmac(salt, args.password) - print('String to be appended to dash.conf:') - print(f'rpcauth={args.username}:{salt}${password_hmac}') - print(f'Your password:\n{args.password}') + if args.json: + odict={'username':args.username, 'password':args.password, 'rpcauth':f'{args.username}:{salt}${password_hmac}'} + print(json.dumps(odict)) + else: + print('String to be appended to dash.conf:') + print(f'rpcauth={args.username}:{salt}${password_hmac}') + print(f'Your password:\n{args.password}') if __name__ == '__main__': main()