Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
132 changes: 91 additions & 41 deletions contrib/seeds/makeseeds.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,28 @@
import collections
import json
import multiprocessing
from typing import List, Dict, Union

NSEEDS=512

MAX_SEEDS_PER_ASN=4

# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
MAX_SEEDS_PER_ASN = {
'ipv4': 4,
'ipv6': 10,
}

MIN_BLOCKS = 2_300_000

PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([a-z2-7]{56}\.onion):(\d+)$")

def parseip(ip_in):

def parseip(ip_in: str) -> Union[dict, None]:
""" Parses a line from `seeds_main.txt` into a dictionary of details for that line.
or `None`, if the line could not be parsed.
"""
m = PATTERN_IPV4.match(ip_in)
ip = None
if m is None:
Expand Down Expand Up @@ -70,49 +76,64 @@ def parseip(ip_in):
"sortkey": sortkey
}

def filtermulticollateralhash(mns):
def filtermulticollateralhash(mns : List[Dict]) -> List[Dict]:
'''Filter out MNs sharing the same collateral hash'''
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['collateralHash']].append(mn)
return [mn for mn in mns if len(hist[mn['collateralHash']]) == 1]

def filtermulticollateraladdress(mns):
def filtermulticollateraladdress(mns : List[Dict]) -> List[Dict]:
'''Filter out MNs sharing the same collateral address'''
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['collateralAddress']].append(mn)
return [mn for mn in mns if len(hist[mn['collateralAddress']]) == 1]

def filtermultipayoutaddress(mns):
def filtermultipayoutaddress(mns : List[Dict]) -> List[Dict]:
'''Filter out MNs sharing the same payout address'''
hist = collections.defaultdict(list)
for mn in mns:
hist[mn['state']['payoutAddress']].append(mn)
return [mn for mn in mns if len(hist[mn['state']['payoutAddress']]) == 1]

def resolveasn(resolver, ip):
if ip['net'] == 'ipv4':
ipaddr = ip['ip']
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip['ip'].split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in resolver.resolve('.'.join(reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
def resolveasn(resolver, ip : Dict) -> Union[int, None]:
""" Look up the asn for an `ip` address by querying cymru.com
on network `net` (e.g. ipv4 or ipv6).

Returns in integer ASN or None if it could not be found.
"""
try:
if ip['net'] == 'ipv4':
ipaddr = ip['ip']
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip['ip'].split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'

asn = int([x.to_text() for x in resolver.resolve('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None

Comment thread
coderabbitai[bot] marked this conversation as resolved.
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]:
""" Prunes `ips` by
(a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and
(b) trimming ips to have at most `max_per_asn` ips from each asn in each net.
"""
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']

my_resolver = dns.resolver.Resolver()

pool = multiprocessing.Pool(processes=16)

# OpenDNS servers
Expand All @@ -121,27 +142,42 @@ def filterbyasn(ips, max_per_asn, max_total):
# Resolve ASNs in parallel
asns = [pool.apply_async(resolveasn, args=(my_resolver, ip)) for ip in ips_ipv46]

# Filter IPv46 by ASN
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
asn_count = {}
net_count: Dict[str, int] = collections.defaultdict(int)
asn_count: Dict[int, int] = collections.defaultdict(int)

for i, ip in enumerate(ips_ipv46):
if len(result) == max_total:
break
try:
asn = asns[i].get()
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except Exception as e:
sys.stderr.write(f'ERR: Could not resolve ASN for {ip["ip"]}: {e}\n')

# Add back Onions
result.extend(ips_onion)
if i % 10 == 0:
# give progress update
print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True)

if net_count[ip['net']] == max_per_net:
# do not add this ip as we already too many
# ips from this network
continue
asn = asns[i].get()
if asn is None or asn_count[asn] == max_per_asn[ip['net']]:
# do not add this ip as we already have too many
# ips from this ASN on this network
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)

Comment on lines +145 to +167
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Track ASN usage per network

asn_count is keyed only by ASN, but the limit you compare against (max_per_asn[ip['net']]) is per network. If an ASN already has 4 IPv4 entries, asn_count[asn] hits 4, so the IPv6 branch stops once that combined counter reaches 10. The intended 10 IPv6 slots per ASN are never reachable (you only get 10 - ipv4_count). Please key the counter by (net, asn) (or maintain separate dicts) so IPv4 and IPv6 quotas are enforced independently.

-    asn_count: Dict[int, int] = collections.defaultdict(int)
+    asn_count: Dict[tuple[str, int], int] = collections.defaultdict(int)
...
-        asn = lookup_asn(ip['net'], ip['ip'])
-        if asn is None or asn_count[asn] == max_per_asn[ip['net']]:
+        asn = lookup_asn(ip['net'], ip['ip'])
+        key = (ip['net'], asn) if asn is not None else None
+        if asn is None or asn_count[key] == max_per_asn[ip['net']]:
             continue
-        asn_count[asn] += 1
+        asn_count[key] += 1
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
asn_count = {}
net_count: Dict[str, int] = collections.defaultdict(int)
asn_count: Dict[int, int] = collections.defaultdict(int)
for i, ip in enumerate(ips_ipv46):
if len(result) == max_total:
break
try:
asn = asns[i].get()
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except Exception as e:
sys.stderr.write(f'ERR: Could not resolve ASN for {ip["ip"]}: {e}\n')
# Add back Onions
result.extend(ips_onion)
if i % 10 == 0:
# give progress update
print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, end='', flush=True)
if net_count[ip['net']] == max_per_net:
# do not add this ip as we already too many
# ips from this network
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn[ip['net']]:
# do not add this ip as we already have too many
# ips from this ASN on this network
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# File: contrib/seeds/makeseeds.py
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count: Dict[str, int] = collections.defaultdict(int)
asn_count: Dict[tuple[str, int], int] = collections.defaultdict(int)
for i, ip in enumerate(ips_ipv46):
if i % 10 == 0:
# give progress update
print(f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r",
file=sys.stderr, end='', flush=True)
if net_count[ip['net']] == max_per_net:
# do not add this ip as we already have too many
# ips from this network
continue
- asn = lookup_asn(ip['net'], ip['ip'])
asn = lookup_asn(ip['net'], ip['ip'])
# track counts separately for each (network, ASN) pair
key = (ip['net'], asn) if asn is not None else None
if asn is None or asn_count[key] == max_per_asn[ip['net']]:
# do not add this ip as we already have too many
# ips from this ASN on this network
continue
asn_count[key] += 1
net_count[ip['net']] += 1
result.append(ip)

# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
Comment on lines +168 to 170
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Restore the global NSEEDS cap

max_per_net is now NSEEDS, so each network can contribute 512 entries and onions add another 512. With three nets this script can emit >1500 seeds, breaking the historical 512-seed contract that downstream tooling assumes. Keep per-net caps if you like, but also enforce the overall NSEEDS ceiling before returning (e.g., slice result to max_total or stop appending once len(result) == NSEEDS).

-    result.extend(ips_onion[0:max_per_net])
-    return result
+    result.extend(ips_onion[0:max_per_net])
+    return result[:max_per_net]
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result[:max_per_net]
🤖 Prompt for AI Agents
In contrib/seeds/makeseeds.py around lines 205-207, the function currently
appends up to NSEEDS per network plus onions which can exceed the historical
global limit of 512; enforce a global NSEEDS cap before returning by truncating
the aggregated result to NSEEDS (e.g., slice result = result[:NSEEDS]) or by
stopping/appending only until len(result) == NSEEDS as you build it so the final
list never exceeds the overall NSEEDS ceiling.


def ip_stats(ips: List[Dict]) -> str:
""" Format and return pretty string from `ips`. """
hist: Dict[str, int] = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1

return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}"

def main():
# This expects a json as outputted by "protx list valid 1"
if len(sys.argv) > 1:
Expand All @@ -155,22 +191,36 @@ def main():
with open(sys.argv[2], 'r', encoding="utf8") as f:
onions = f.read().split('\n')

print(f'Total mns: {len(mns)}', file=sys.stderr)
# Skip PoSe banned MNs
mns = [mn for mn in mns if mn['state']['PoSeBanHeight'] == -1]
print(f'After skip entries from PoSe banned MNs: {len(mns)}', file=sys.stderr)
# Skip MNs with < 10000 confirmations
mns = [mn for mn in mns if mn['confirmations'] >= 10000]
print(f'After skip MNs with less than 10000 confirmations: {len(mns)}', file=sys.stderr)

# Filter out MNs which are definitely from the same person/operator
mns = filtermulticollateralhash(mns)
mns = filtermulticollateraladdress(mns)
mns = filtermultipayoutaddress(mns)
print(f'After removing duplicates: {len(mns)}', file=sys.stderr)

# Extract IPs
ips = [parseip(mn['state']['addresses']['core_p2p'][0]) for mn in mns]
ips = [parseip(mn['state']['addresses'][0]) for mn in mns]
for onion in onions:
Comment on lines 208 to 210
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Guard against empty addresses arrays

Indexing addresses[0] may raise if the array is empty/missing. Skip such MNs safely.

-    ips = [parseip(mn['state']['addresses'][0]) for mn in mns]
+    ips = []
+    for mn in mns:
+        addrs = mn['state'].get('addresses') or []
+        if not addrs:
+            continue
+        ips.append(parseip(addrs[0]))
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Extract IPs
ips = [parseip(mn['state']['addresses']['core_p2p'][0]) for mn in mns]
ips = [parseip(mn['state']['addresses'][0]) for mn in mns]
for onion in onions:
# Extract IPs
ips = []
for mn in mns:
addrs = mn['state'].get('addresses') or []
if not addrs:
continue
ips.append(parseip(addrs[0]))
for onion in onions:

parsed = parseip(onion)
if parsed is not None:
ips.append(parsed)

print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print(f'{ip_stats(ips):s} Initial', file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print(f'{ip_stats(ips):s} Skip entries with invalid address', file=sys.stderr)

# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print(f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']), reverse=True)

Expand Down
Empty file removed contrib/seeds/suspicious_hosts.txt
Empty file.
18 changes: 4 additions & 14 deletions doc/REST-interface.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,17 +83,7 @@ Responds with 404 if block not found.

Returns various state info regarding block chain processing.
Only supports JSON as output format.
* chain : (string) current network name (main, test, regtest)
* blocks : (numeric) the current number of blocks processed in the server
* headers : (numeric) the current number of headers we have validated
* bestblockhash : (string) the hash of the currently best block
* difficulty : (numeric) the current difficulty
* mediantime : (numeric) the median time of the 11 blocks before the most recent block on the blockchain
* verificationprogress : (numeric) estimate of verification progress [0..1]
* chainwork : (string) total amount of work in active chain, in hexadecimal
* pruned : (boolean) if the blocks are subject to pruning
* pruneheight : (numeric) highest block available
* softforks : (array) status of softforks in progress
Refer to the `getblockchaininfo` RPC help for details.

#### Query UTXO set
- `GET /rest/getutxos/<TXID>-<N>/<TXID>-<N>/.../<TXID>-<N>.<bin|hex|json>`
Expand Down Expand Up @@ -130,13 +120,13 @@ $ curl localhost:19998/rest/getutxos/checkmempool/b2cdfd7b89def827ff8af7cd9bff76
#### Memory pool
`GET /rest/mempool/info.json`

Returns various information about the TX mempool.
Returns various information about the transaction mempool.
Only supports JSON as output format.
Refer to the `getmempoolinfo` RPC for documentation of the fields.
Refer to the `getmempoolinfo` RPC help for details.

`GET /rest/mempool/contents.json`

Returns transactions in the TX mempool.
Returns the transactions in the mempool.
Only supports JSON as output format.
Refer to the `getrawmempool` RPC help for details.

Expand Down
50 changes: 25 additions & 25 deletions doc/dependencies.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,41 +15,41 @@ You can find installation instructions in the `build-*.md` file for your platfor

## Required

| Dependency | Version used | Minimum required | Runtime |
| --- | --- | --- | --- |
| [Boost](https://www.boost.org/users/download/) | 1.81.0 | [1.73.0](https://github.com/bitcoin/bitcoin/pull/29066) | No |
| [libevent](https://github.com/libevent/libevent/releases) | 2.1.12-stable | [2.1.8](https://github.com/bitcoin/bitcoin/pull/24681) | No |
| [glibc](https://www.gnu.org/software/libc/) | N/A | [2.31](https://github.com/bitcoin/bitcoin/pull/29987) | Yes |
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [Boost](../depends/packages/boost.mk) | [link](https://www.boost.org/users/download/) | 1.81.0 | [1.73.0](https://github.com/bitcoin/bitcoin/pull/29066) | No |
| [libevent](../depends/packages/libevent.mk) | [link](https://github.com/libevent/libevent/releases) | [2.1.12-stable](https://github.com/bitcoin/bitcoin/pull/21991) | [2.1.8](https://github.com/bitcoin/bitcoin/pull/24681) | No |
| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.31](https://github.com/bitcoin/bitcoin/pull/29987) | Yes |
Comment on lines 16 to +22
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion | 🟠 Major

🧩 Analysis chain

Optional: quick verification script to catch residual “[link]” anchors
Run this to ensure no “[link]” anchors remain and tables are surrounded by blank lines.

Also applies to: 26-36, 38-47, 49-53


🏁 Script executed:

#!/bin/bash
set -euo pipefail

# 1) Ensure there are no generic [link](...) anchors left
rg -n '\[link\]\(' doc/dependencies.md || echo "No generic [link] anchors found."

# 2) Spot tables not preceded by a blank line (simple heuristic)
#    Prints header lines not preceded by a blank line.
awk '
  /^\|[[:space:]]*Dependency[[:space:]]*\|/ {
    if (prev_blank != 1) {
      printf("Table header missing leading blank line at line %d\n", NR)
    }
  }
  { prev_blank = ($0 ~ /^[[:space:]]*$/) ? 1 : 0 }
' doc/dependencies.md

Length of output: 2400


Replace generic “[link]” anchors and add blank lines before tables

  • doc/dependencies.md still contains generic [link](…) anchors (e.g. lines 20–22, 28, 33–36, 41–42, 47, 52–53); replace each with descriptive link text.
  • Ensure every Markdown table is preceded by a blank line for proper rendering.
🧰 Tools
🪛 markdownlint-cli2 (0.18.1)

20-20: Link text should be descriptive

(MD059, descriptive-link-text)


21-21: Link text should be descriptive

(MD059, descriptive-link-text)


22-22: Link text should be descriptive

(MD059, descriptive-link-text)

🤖 Prompt for AI Agents
In doc/dependencies.md around lines 16 to 22 (and other occurrences listed in
the comment: lines ~28, 33–36, 41–42, 47, 52–53), replace every generic
"[link](...)" anchor with descriptive link text that indicates the target (e.g.,
"Boost downloads", "libevent releases", "glibc homepage", or the specific
project/release page) while keeping the original URLs, and ensure every Markdown
table in the file is preceded by a single blank line (insert a blank line
immediately before each table header) so tables render correctly.


## Optional

| Dependency | Version used | Minimum required | Runtime |
| --- | --- | --- | --- |
| [libgmp](https://gmplib.org/download/gmp/)<sup>[ \* ](#note1)</sup> | 6.3.0 | [6.2.0](https://github.com/dashpay/bls-signatures/pull/92) | No |
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| libgmp | [link](https://gmplib.org/download/gmp/)<sup>[ \* ](#note1)</sup> | 6.3.0 | [6.2.0](https://github.com/dashpay/bls-signatures/pull/92) | No |
Comment on lines +26 to +28
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Fix footnote formatting (MD039) and descriptive link text (MD059)
Remove spaces inside the footnote link text and use descriptive link text in Releases column.

-| libgmp | [link](https://gmplib.org/download/gmp/)<sup>[ \* ](#note1)</sup> | 6.3.0 | [6.2.0](https://github.com/dashpay/bls-signatures/pull/92) | No |
+| libgmp | [Releases](https://gmplib.org/download/gmp/)<sup>[*](#note1)</sup> | 6.3.0 | [6.2.0](https://github.com/dashpay/bls-signatures/pull/92) | No |
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| libgmp | [link](https://gmplib.org/download/gmp/)<sup>[ \* ](#note1)</sup> | 6.3.0 | [6.2.0](https://github.com/dashpay/bls-signatures/pull/92) | No |
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| libgmp | [Releases](https://gmplib.org/download/gmp/)<sup>[*](#note1)</sup> | 6.3.0 | [6.2.0](https://github.com/dashpay/bls-signatures/pull/92) | No |
🧰 Tools
🪛 markdownlint-cli2 (0.18.1)

28-28: Spaces inside link text

(MD039, no-space-in-links)


28-28: Spaces inside link text

(MD039, no-space-in-links)


28-28: Link text should be descriptive

(MD059, descriptive-link-text)

🤖 Prompt for AI Agents
In doc/dependencies.md around lines 26 to 28, the table cell for libgmp uses a
non-descriptive link text ("link") and a footnote label with spaces ("[ \*
](#note1)"), which triggers MD059 and MD039; replace the Releases column link
text with a descriptive label such as "GMP releases" (or "GMP download page")
pointing to https://gmplib.org/download/gmp/ and remove the spaces in the
footnote token so it reads "[*](#note1)"; ensure the resulting markdown uses
descriptive link text and a correctly formatted footnote marker.


### GUI
| Dependency | Version used | Minimum required | Runtime |
| --- | --- | --- | --- |
| [Fontconfig](https://www.freedesktop.org/wiki/Software/fontconfig/) | 2.12.6 | 2.6 | Yes |
| [FreeType](https://freetype.org) | 2.11.0 | 2.3.0 | Yes |
| [qrencode](https://fukuchi.org/works/qrencode/) | [4.1.1](https://fukuchi.org/works/qrencode) | | No |
| [Qt](https://www.qt.io) | [5.15.13](https://download.qt.io/official_releases/qt/) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No |
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [Fontconfig](../depends/packages/fontconfig.mk) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes |
| [FreeType](../depends/packages/freetype.mk) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes |
| [qrencode](../depends/packages/qrencode.mk) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | | No |
| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.3](https://github.com/bitcoin/bitcoin/pull/24668) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No |

### Networking
| Dependency | Version used | Minimum required | Runtime |
| --- | --- | --- | --- |
| [libnatpmp](https://github.com/miniupnp/libnatpmp/) | commit [07004b9...](https://github.com/miniupnp/libnatpmp/tree/07004b97cf691774efebe70404cf22201e4d330d) | | No |
| [MiniUPnPc](https://miniupnp.tuxfamily.org/) | 2.2.2 | 1.9 | No |
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [libnatpmp](../depends/packages/libnatpmp.mk) | [link](https://github.com/miniupnp/libnatpmp/) | commit [07004b9...](https://github.com/miniupnp/libnatpmp/tree/07004b97cf691774efebe70404cf22201e4d330d) | | No |
| [MiniUPnPc](../depends/packages/miniupnpc.mk) | [link](https://miniupnp.tuxfamily.org/) | [2.2.2](https://github.com/bitcoin/bitcoin/pull/20421) | 1.9 | No |

### Notifications
| Dependency | Version used | Minimum required | Runtime |
| --- | --- | --- | --- |
| [ZeroMQ](https://zeromq.org) | 4.3.5 | 4.0.0 | No |
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [ZeroMQ](../depends/packages/zeromq.mk) | [link](https://github.com/zeromq/libzmq/releases) | 4.3.5 | 4.0.0 | No |
Comment thread
coderabbitai[bot] marked this conversation as resolved.

### Wallet
| Dependency | Version used | Minimum required | Runtime |
| --- | --- | --- | --- |
| [Berkeley DB](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) (legacy wallet) | 4.8.30 | 4.8.x | No |
| [SQLite](https://sqlite.org) | 3.38.5 | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No |
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No |
| [SQLite](../depends/packages/sqlite.mk) | [link](https://sqlite.org) | 3.38.5 | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No |
Comment thread
coderabbitai[bot] marked this conversation as resolved.

<a name="note1">Note \*</a> : The minimum supported version on macOS is 6.3.0
2 changes: 1 addition & 1 deletion src/bench/addrman.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ static void AddrManGetAddr(benchmark::Bench& bench)
FillAddrMan(addrman);

bench.run([&] {
const auto& addresses = addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23, /* network */ std::nullopt);
const auto& addresses = addrman.GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt);
assert(addresses.size() > 0);
});
}
Expand Down
2 changes: 1 addition & 1 deletion src/bench/mempool_stress.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ static void ComplexMemPool(benchmark::Bench& bench)
if (bench.complexityN() > 1) {
childTxs = static_cast<int>(bench.complexityN());
}
std::vector<CTransactionRef> ordered_coins = CreateOrderedCoins(det_rand, childTxs, /* min_ancestors */ 1);
std::vector<CTransactionRef> ordered_coins = CreateOrderedCoins(det_rand, childTxs, /*min_ancestors=*/1);
const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(CBaseChainParams::MAIN);
CTxMemPool& pool = *testing_setup.get()->m_node.mempool;
LOCK2(cs_main, pool.cs);
Expand Down
4 changes: 2 additions & 2 deletions src/bench/rpc_mempool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,11 +31,11 @@ static void RpcMempool(benchmark::Bench& bench)
tx.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL;
tx.vout[0].nValue = i;
const CTransactionRef tx_r{MakeTransactionRef(tx)};
AddTx(tx_r, /* fee */ i, pool);
AddTx(tx_r, /*fee=*/i, pool);
}

bench.minEpochIterations(40).run([&] {
(void)MempoolToJSON(pool, nullptr, /*verbose*/ true);
(void)MempoolToJSON(pool, nullptr, /*verbose=*/true);
});
}

Expand Down
8 changes: 4 additions & 4 deletions src/bench/wallet_balance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,10 +50,10 @@ static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const b
});
}

static void WalletBalanceDirty(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ true, /* add_mine */ true, 2500); }
static void WalletBalanceClean(benchmark::Bench& bench) {WalletBalance(bench, /* set_dirty */ false, /* add_mine */ true, 8000); }
static void WalletBalanceMine(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_mine */ true, 16000); }
static void WalletBalanceWatch(benchmark::Bench& bench) { WalletBalance(bench, /* set_dirty */ false, /* add_mine */ false, 8000); }
static void WalletBalanceDirty(benchmark::Bench& bench) { WalletBalance(bench, /*set_dirty=*/true, /*add_mine=*/true, 2500); }
static void WalletBalanceClean(benchmark::Bench& bench) {WalletBalance(bench, /*set_dirty=*/false, /*add_mine=*/true, 8000); }
static void WalletBalanceMine(benchmark::Bench& bench) { WalletBalance(bench, /*set_dirty=*/false, /*add_mine=*/true, 16000); }
static void WalletBalanceWatch(benchmark::Bench& bench) { WalletBalance(bench, /*set_dirty=*/false, /*add_mine=*/false, 8000); }

BENCHMARK(WalletBalanceDirty);
BENCHMARK(WalletBalanceClean);
Expand Down
2 changes: 1 addition & 1 deletion src/net.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4217,7 +4217,7 @@ std::vector<CAddress> CConnman::GetAddresses(CNode& requestor, size_t max_addres
auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{});
CachedAddrResponse& cache_entry = r.first->second;
if (cache_entry.m_cache_entry_expiration < current_time) { // If emplace() added new one it has expiration 0.
cache_entry.m_addrs_response_cache = GetAddresses(max_addresses, max_pct, /* network */ std::nullopt);
cache_entry.m_addrs_response_cache = GetAddresses(max_addresses, max_pct, /*network=*/std::nullopt);
// Choosing a proper cache lifetime is a trade-off between the privacy leak minimization
// and the usefulness of ADDR responses to honest users.
//
Expand Down
4 changes: 2 additions & 2 deletions src/net_processing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4638,7 +4638,7 @@ void PeerManagerImpl::ProcessMessage(
BlockValidationState state;
if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header}, state, m_chainparams, &pindex)) {
if (state.IsInvalid()) {
MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock");
MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock");
return;
}
}
Expand Down Expand Up @@ -4984,7 +4984,7 @@ void PeerManagerImpl::ProcessMessage(
peer->m_addrs_to_send.clear();
std::vector<CAddress> vAddr;
if (pfrom.HasPermission(NetPermissionFlags::Addr)) {
vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /* network */ std::nullopt);
vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt);
} else {
vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND);
}
Expand Down
Loading
Loading