From 58a52a9ca60b9446d4544bbb62c049ce01709708 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Fri, 13 Feb 2026 13:00:41 +0100 Subject: [PATCH 1/8] feat: add masternode network generation script Multi-node masternode network generator for regtest test data. Produces 5 datadirs (1 controller + 4 MNs) with DKG cycles and quorum history for SPV masternode list sync integration testing. Follows Dash Core test framework ordering: - mocktime init -> force mnsync -> connect nodes - setmnthreadactive toggle during peer connections - DKG phase-by-phase progression with mockscheduler --- generate_masternode.py | 657 ++++++++++++++++++++++++++++++++ generator/masternode_network.py | 339 ++++++++++++++++ 2 files changed, 996 insertions(+) create mode 100644 generate_masternode.py create mode 100644 generator/masternode_network.py diff --git a/generate_masternode.py b/generate_masternode.py new file mode 100644 index 0000000..1a78e37 --- /dev/null +++ b/generate_masternode.py @@ -0,0 +1,657 @@ +#!/usr/bin/env python3 +""" +Dash Masternode Network Test Data Generator + +Generates a regtest blockchain with an active masternode network for SPV +masternode list sync testing. Follows the same setup sequence as Dash Core's +test framework (test_framework.py). + +Produces 5 node datadirs (1 controller + 4 MNs) with completed DKG cycles. + +Usage: + python3 generate_masternode.py --dashd-path /path/to/dashd + python3 generate_masternode.py --dashd-path /path/to/dashd --dkg-cycles 12 +""" + +import argparse +import json +import shutil +import sys +import time +from dataclasses import dataclass +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent)) + +from generator.errors import RPCError +from generator.masternode_network import MasternodeNetwork +from generator.wallet_export import collect_wallet_stats, save_wallet_file + +# Constants matching Dash Core test framework +TIME_GENESIS_BLOCK = 1417713337 +DKG_INTERVAL = 24 +NUM_MASTERNODES = 4 +SPORK_PRIVATE_KEY = "cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK" + +DASHD_EXTRA_ARGS = [ + "-dip3params=2:2", + "-testactivationheight=v20@100", + "-testactivationheight=mn_rr@100", + "-llmqtestinstantsenddip0024=llmq_test_instantsend", +] + + +@dataclass +class MasternodeConfig: + dashd_path: str + dkg_cycles: int + output_dir: str + + +def _all_nodes(network): + return [network.controller] + network.masternodes + + +def _set_mocktime(network, mocktime): + """Set mocktime on all nodes.""" + network._mocktime = mocktime + for node in _all_nodes(network): + try: + node.rpc.call("setmocktime", mocktime) + except Exception: + pass + + +def _bump_mocktime(network, seconds=1): + """Advance mocktime and run mockscheduler on all nodes.""" + network._mocktime += seconds + for node in _all_nodes(network): + try: + node.rpc.call("setmocktime", network._mocktime) + node.rpc.call("mockscheduler", seconds) + except Exception: + pass + + +def _move_blocks(network, count): + """Bump mocktime, generate blocks, and wait for sync.""" + if count <= 0: + return + _bump_mocktime(network, 1) + network.generate_blocks(count) + network.wait_for_sync() + + +def _force_finish_mnsync(node): + """Force a node to finish mnsync (masternodes reject connections until synced).""" + for _ in range(20): + try: + status = node.rpc.call("mnsync", "status") + if status.get("IsSynced", False): + return + node.rpc.call("mnsync", "next") + time.sleep(0.5) + except Exception: + time.sleep(0.5) + + +def _wait_for_quorum_phase(network, quorum_hash, phase, expected_members=None, timeout=60): + """Wait for masternodes to reach a DKG phase.""" + if expected_members is None: + expected_members = len(network.masternodes) + start = time.time() + while time.time() - start < timeout: + member_count = 0 + for mn in network.masternodes: + try: + status = mn.rpc.call("quorum", "dkgstatus") + for s in status.get("session", []): + if s.get("llmqType") != "llmq_test": + continue + qs = s.get("status", {}) + if qs.get("quorumHash") == quorum_hash and qs.get("phase") == phase: + member_count += 1 + break + except Exception: + pass + if member_count >= expected_members: + return True + _bump_mocktime(network, 1) + time.sleep(0.3) + return False + + +def _wait_for_quorum_connections(network, quorum_hash, timeout=60): + """Wait for masternodes to establish quorum connections.""" + start = time.time() + while time.time() - start < timeout: + all_connected = True + for mn in network.masternodes: + try: + status = mn.rpc.call("quorum", "dkgstatus") + found_session = False + for s in status.get("session", []): + if s.get("llmqType") != "llmq_test": + continue + if s.get("status", {}).get("quorumHash") == quorum_hash: + found_session = True + break + if not found_session: + all_connected = False + break + except Exception: + all_connected = False + break + if all_connected: + return True + _bump_mocktime(network, 1) + time.sleep(0.3) + return False + + +def _wait_for_quorum_commitment(network, quorum_hash, timeout=30): + """Wait for minable commitments on all masternodes.""" + start = time.time() + while time.time() - start < timeout: + all_ready = True + for mn in network.masternodes: + try: + status = mn.rpc.call("quorum", "dkgstatus") + found = False + for c in status.get("minableCommitments", []): + if c.get("llmqType") == 100 and c.get("quorumHash") == quorum_hash: + if c.get("quorumPublicKey", "0" * 96) != "0" * 96: + found = True + break + if not found: + all_ready = False + break + except Exception: + all_ready = False + break + if all_ready: + return True + time.sleep(0.5) + return False + + +def _wait_for_quorum_list(network, quorum_hash, timeout=15): + """Wait for quorum to appear in the quorum list.""" + rpc = network.controller.rpc + start = time.time() + while time.time() - start < timeout: + try: + qlist = rpc.call("quorum", "list", 1) + if quorum_hash in qlist.get("llmq_test", []): + return True + except Exception: + pass + time.sleep(0.3) + return False + + +def phase_1_bootstrap(network): + """Bootstrap: create wallet, mine initial blocks, initialize mocktime.""" + print("\n" + "=" * 60) + print("Phase 1: Bootstrap") + print("=" * 60) + + rpc = network.controller.rpc + + # Initialize mocktime on controller (matches Dash Core test framework) + network._mocktime = TIME_GENESIS_BLOCK + rpc.call("setmocktime", network._mocktime) + + # Create the SPV test wallet + try: + rpc.call("createwallet", "wallet") + print(" Created 'wallet' wallet") + except RPCError as e: + if "already exists" in str(e).lower(): + rpc.call("loadwallet", "wallet") + print(" Loaded existing 'wallet' wallet") + else: + raise + + # Get a funding address for protx registration + network.fund_address = rpc.call("getnewaddress") + + # Mine blocks for maturity + activation (need coins to be spendable) + # Mine in batches with mocktime bumps (like Dash Core's cache setup) + for batch in range(5): + _bump_mocktime(network, 25 * 156) + rpc.call("generatetoaddress", 25, network.fund_address) + + height = rpc.call("getblockcount") + balance = rpc.call("getbalance") + print(f" Mined to height {height}, balance: {balance}") + + blockchain_info = rpc.call("getblockchaininfo") + print(f" Softforks: {list(blockchain_info.get('softforks', {}).keys())}") + + +def phase_2_register_masternodes(network): + """Register masternodes via protx register_fund.""" + print("\n" + "=" * 60) + print("Phase 2: Register masternodes") + print("=" * 60) + + rpc = network.controller.rpc + mn_ports = network.allocate_mn_ports() + + for i in range(NUM_MASTERNODES): + mn_name = f"mn{i + 1}" + service_addr = f"127.0.0.1:{mn_ports[i]}" + print(f"\n Registering {mn_name} (service: {service_addr})...") + + bls_result = rpc.call("bls", "generate") + bls_public = bls_result["public"] + bls_secret = bls_result["secret"] + + owner_addr = rpc.call("getnewaddress") + voting_addr = rpc.call("getnewaddress") + payout_addr = rpc.call("getnewaddress") + collateral_addr = rpc.call("getnewaddress") + + # register_fund: collateral and fees come from fund_address + pro_tx_hash = rpc.call( + "protx", "register_fund", + collateral_addr, + [service_addr], + owner_addr, + bls_public, + voting_addr, + 0, + payout_addr, + network.fund_address, + ) + + # Bury the protx (1 confirmation) + _bump_mocktime(network, 601) + rpc.call("generatetoaddress", 1, network.fund_address) + + mn_info = { + "index": i, + "name": mn_name, + "pro_tx_hash": pro_tx_hash, + "bls_public_key": bls_public, + "bls_private_key": bls_secret, + "owner_address": owner_addr, + "voting_address": voting_addr, + "payout_address": payout_addr, + } + network.masternode_info.append(mn_info) + print(f" proTxHash: {pro_tx_hash}") + + height = rpc.call("getblockcount") + print(f"\n All {NUM_MASTERNODES} masternodes registered (height: {height})") + + +def phase_3_start_masternodes(network): + """Copy datadirs, start MN nodes, force mnsync, then connect. + + Follows the exact ordering from Dash Core's test framework: + 1. Start nodes (with mocktime on command line) + 2. Set mocktime via RPC + 3. Force mnsync completion (masternodes reject connections until synced) + 4. Connect nodes to each other + """ + print("\n" + "=" * 60) + print("Phase 3: Start masternode nodes") + print("=" * 60) + + # Start all nodes (does not connect them yet) + network.start_masternode_nodes(network.controller.datadir) + + # Re-load the "wallet" wallet (lost during controller restart) + rpc = network.controller.rpc + try: + rpc.call("loadwallet", "wallet") + except RPCError as e: + if "already loaded" not in str(e).lower(): + raise + + # Set mocktime on all nodes via RPC (in addition to -mocktime= cmd arg) + _set_mocktime(network, network._mocktime) + + # Force mnsync on all nodes (must happen before connecting) + print(" Forcing mnsync completion on controller...") + _force_finish_mnsync(network.controller) + for mn in network.masternodes: + print(f" Forcing mnsync completion on {mn.name}...") + _force_finish_mnsync(mn) + print(" All nodes mnsync complete") + + # Now connect all nodes (mnsync must be done first) + print(" Connecting nodes...") + network.connect_all() + + # Mine 8 blocks for masternode maturity + _move_blocks(network, 8) + + # Verify masternode status + mn_list = rpc.call("masternode", "list") + enabled_count = sum(1 for v in mn_list.values() if "ENABLED" in str(v)) + print(f" Masternodes ENABLED: {enabled_count}/{NUM_MASTERNODES}") + + if enabled_count < NUM_MASTERNODES: + for _ in range(10): + _move_blocks(network, 4) + time.sleep(1) + mn_list = rpc.call("masternode", "list") + enabled_count = sum(1 for v in mn_list.values() if "ENABLED" in str(v)) + if enabled_count >= NUM_MASTERNODES: + break + print(f" Final ENABLED count: {enabled_count}/{NUM_MASTERNODES}") + + height = rpc.call("getblockcount") + print(f" Height after MN maturity: {height}") + + +def phase_4_enable_sporks(network): + """Enable sporks for DKG, InstantSend, ChainLocks.""" + print("\n" + "=" * 60) + print("Phase 4: Enable sporks") + print("=" * 60) + + rpc = network.controller.rpc + + rpc.call("sporkupdate", "SPORK_17_QUORUM_DKG_ENABLED", 0) + rpc.call("sporkupdate", "SPORK_21_QUORUM_ALL_CONNECTED", 0) + rpc.call("sporkupdate", "SPORK_2_INSTANTSEND_ENABLED", 0) + rpc.call("sporkupdate", "SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0) + rpc.call("sporkupdate", "SPORK_19_CHAINLOCKS_ENABLED", 0) + + # Wait for spork propagation + time.sleep(3) + _bump_mocktime(network, 1) + + sporks = rpc.call("spork", "show") + for name in ["SPORK_17_QUORUM_DKG_ENABLED", "SPORK_21_QUORUM_ALL_CONNECTED", + "SPORK_2_INSTANTSEND_ENABLED", "SPORK_19_CHAINLOCKS_ENABLED"]: + value = sporks.get(name, "unknown") + print(f" {name}: {value}") + + +def phase_5_mine_dkg_cycles(network, num_cycles): + """Mine DKG cycles following Dash Core's mine_quorum() pattern.""" + print("\n" + "=" * 60) + print(f"Phase 5: Mine {num_cycles} DKG cycles") + print("=" * 60) + + rpc = network.controller.rpc + completed_cycles = 0 + # LLMQ_TEST has 3 members out of our 4 MNs + expected_members = 3 + + for cycle in range(num_cycles): + height = rpc.call("getblockcount") + + # Move to next DKG cycle start + skip_count = DKG_INTERVAL - (height % DKG_INTERVAL) + if skip_count != 0 and skip_count != DKG_INTERVAL: + _bump_mocktime(network, 1) + network.generate_blocks(skip_count) + network.wait_for_sync() + + quorum_hash = rpc.call("getbestblockhash") + height = rpc.call("getblockcount") + print(f"\n Cycle {cycle + 1}/{num_cycles} at height {height}") + + # Phase 1: Init - wait for quorum connections + print(" Phase 1 (init)...", end="", flush=True) + if not _wait_for_quorum_phase(network, quorum_hash, 1, expected_members, timeout=60): + print(" timeout") + # Debug output + for mn in network.masternodes: + try: + status = mn.rpc.call("quorum", "dkgstatus") + sessions = status.get("session", []) + print(f" {mn.name}: time={status.get('time')}, sessions={len(sessions)}, " + f"tip={status.get('session', [{}])[0].get('status', {}).get('quorumHash', 'none')[:16] if sessions else 'none'}") + except Exception as e: + print(f" {mn.name}: error {e}") + _move_blocks(network, DKG_INTERVAL) + continue + _wait_for_quorum_connections(network, quorum_hash, timeout=60) + print(" ok") + _move_blocks(network, 2) + + # Phase 2: Contribute + print(" Phase 2 (contribute)...", end="", flush=True) + if not _wait_for_quorum_phase(network, quorum_hash, 2, expected_members, timeout=30): + print(" timeout") + _move_blocks(network, DKG_INTERVAL) + continue + print(" ok") + _move_blocks(network, 2) + + # Phase 3: Complain + print(" Phase 3 (complain)...", end="", flush=True) + if not _wait_for_quorum_phase(network, quorum_hash, 3, expected_members, timeout=30): + print(" timeout") + _move_blocks(network, DKG_INTERVAL) + continue + print(" ok") + _move_blocks(network, 2) + + # Phase 4: Justify + print(" Phase 4 (justify)...", end="", flush=True) + if not _wait_for_quorum_phase(network, quorum_hash, 4, expected_members, timeout=30): + print(" timeout") + _move_blocks(network, DKG_INTERVAL) + continue + print(" ok") + _move_blocks(network, 2) + + # Phase 5: Commit + print(" Phase 5 (commit)...", end="", flush=True) + if not _wait_for_quorum_phase(network, quorum_hash, 5, expected_members, timeout=30): + print(" timeout") + _move_blocks(network, DKG_INTERVAL) + continue + print(" ok") + _move_blocks(network, 2) + + # Phase 6: Mining + print(" Phase 6 (mining)...", end="", flush=True) + if not _wait_for_quorum_phase(network, quorum_hash, 6, expected_members, timeout=30): + print(" timeout") + _move_blocks(network, DKG_INTERVAL) + continue + print(" ok") + + # Wait for final commitment + print(" Waiting for commitment...", end="", flush=True) + if not _wait_for_quorum_commitment(network, quorum_hash, timeout=30): + print(" timeout") + _move_blocks(network, DKG_INTERVAL) + continue + print(" ok") + + # Mine the commitment block (getblocktemplate triggers CreateNewBlock) + _bump_mocktime(network, 1) + rpc.call("getblocktemplate") + _move_blocks(network, 1) + + # Verify quorum appeared in the list + if _wait_for_quorum_list(network, quorum_hash, timeout=15): + # Mine 8 blocks for quorum maturity + _move_blocks(network, 8) + completed_cycles += 1 + total = len(rpc.call("quorum", "list").get("llmq_test", [])) + print(f" Quorum formed (total: {total})") + else: + print(f" Quorum not in list") + + height = rpc.call("getblockcount") + quorum_list = rpc.call("quorum", "list") + print(f"\n Completed {completed_cycles}/{num_cycles} DKG cycles (height: {height})") + print(f" Quorums: llmq_test={len(quorum_list.get('llmq_test', []))}, " + f"llmq_test_dip0024={len(quorum_list.get('llmq_test_dip0024', []))}") + return completed_cycles + + +def phase_6_generate_test_transactions(network): + """Send transactions to the SPV test wallet.""" + print("\n" + "=" * 60) + print("Phase 6: Generate SPV test transactions") + print("=" * 60) + + rpc = network.controller.rpc + + addresses = [] + for _ in range(10): + addr = rpc.call("getnewaddress", wallet="wallet") + addresses.append(addr) + + amounts = [1.0, 5.0, 10.0, 0.5, 25.0, 0.1, 50.0, 2.5] + for i, amount in enumerate(amounts): + addr = addresses[i % len(addresses)] + rpc.call("sendtoaddress", addr, amount) + if (i + 1) % 3 == 0: + _move_blocks(network, 1) + + _move_blocks(network, 6) + + height = rpc.call("getblockcount") + print(f" Generated {len(amounts)} test transactions (height: {height})") + + +def phase_7_export(network, config, dkg_cycles_completed): + """Export all node data and metadata.""" + print("\n" + "=" * 60) + print("Phase 7: Export") + print("=" * 60) + + rpc = network.controller.rpc + chain_height = rpc.call("getblockcount") + + output_dir = Path(config.output_dir) / "regtest-mn-v0.0.1" + if output_dir.exists(): + print(f" Removing existing output: {output_dir}") + shutil.rmtree(output_dir) + output_dir.mkdir(parents=True) + + # Collect wallet stats while nodes are running + print(" Collecting wallet statistics...") + wallet_stats = collect_wallet_stats(rpc, "wallet") + wallets_dir = output_dir / "wallets" + wallets_dir.mkdir() + save_wallet_file(wallet_stats, wallets_dir / "wallet.json") + print(f" wallet.json: {len(wallet_stats['transactions'])} txs, balance: {wallet_stats['balance']:.8f}") + + # Stop all nodes cleanly + print(" Stopping all nodes...") + network.stop_all() + time.sleep(2) + + # Copy datadirs + print(" Copying controller datadir...") + controller_dest = output_dir / "controller" + shutil.copytree(network.controller.datadir / "regtest", controller_dest / "regtest") + + for i, mn in enumerate(network.masternodes): + mn_name = f"mn{i + 1}" + print(f" Copying {mn_name} datadir...") + mn_dest = output_dir / mn_name + shutil.copytree(mn.datadir / "regtest", mn_dest / "regtest") + + # Write network.json + network_metadata = { + "version": "0.0.1", + "chain_height": chain_height, + "dkg_cycles_completed": dkg_cycles_completed, + "dkg_interval": DKG_INTERVAL, + "controller": { + "datadir": "controller", + "wallet": "wallet", + }, + "masternodes": [ + { + "index": mn["index"], + "datadir": mn["name"], + "pro_tx_hash": mn["pro_tx_hash"], + "bls_private_key": mn["bls_private_key"], + "bls_public_key": mn["bls_public_key"], + "owner_address": mn["owner_address"], + "voting_address": mn["voting_address"], + "payout_address": mn["payout_address"], + } + for mn in network.masternode_info + ], + "spork_private_key": SPORK_PRIVATE_KEY, + "dashd_extra_args": DASHD_EXTRA_ARGS, + } + + with open(output_dir / "network.json", "w") as f: + json.dump(network_metadata, f, indent=2) + + total_size = sum(f.stat().st_size for f in output_dir.rglob("*") if f.is_file()) + print(f"\n Exported to {output_dir}") + print(f" Total size: {total_size / 1024 / 1024:.1f} MB") + print(f" Chain height: {chain_height}") + print(f" DKG cycles: {dkg_cycles_completed}") + + return output_dir + + +def main(): + parser = argparse.ArgumentParser(description="Generate masternode network test data") + parser.add_argument("--dashd-path", required=True, help="Path to dashd binary") + parser.add_argument("--dkg-cycles", type=int, default=8, help="Number of DKG cycles (default: 8)") + parser.add_argument("--output-dir", default=str(Path(__file__).parent / "data"), help="Output directory") + args = parser.parse_args() + + config = MasternodeConfig( + dashd_path=args.dashd_path, + dkg_cycles=args.dkg_cycles, + output_dir=args.output_dir, + ) + + dashd_bin = Path(config.dashd_path) + if not dashd_bin.exists(): + print(f"dashd not found: {dashd_bin}") + sys.exit(1) + + extra_args = list(DASHD_EXTRA_ARGS) + extra_args.append(f"-sporkkey={SPORK_PRIVATE_KEY}") + + network = MasternodeNetwork( + dashd_path=config.dashd_path, + num_masternodes=NUM_MASTERNODES, + base_extra_args=extra_args, + ) + + try: + # Set initial mocktime on network object before any node starts + network._mocktime = TIME_GENESIS_BLOCK + + network.start_controller(extra_args=[f"-mocktime={TIME_GENESIS_BLOCK}"]) + phase_1_bootstrap(network) + phase_2_register_masternodes(network) + phase_3_start_masternodes(network) + phase_4_enable_sporks(network) + dkg_cycles = phase_5_mine_dkg_cycles(network, config.dkg_cycles) + phase_6_generate_test_transactions(network) + output_dir = phase_7_export(network, config, dkg_cycles) + + print("\n" + "=" * 60) + print("Generation complete!") + print("=" * 60) + print(f"Output: {output_dir}") + + except KeyboardInterrupt: + print("\nInterrupted by user") + sys.exit(1) + except Exception as e: + print(f"\nGeneration failed: {e}") + import traceback + traceback.print_exc() + sys.exit(1) + finally: + network.cleanup() + + +if __name__ == "__main__": + main() diff --git a/generator/masternode_network.py b/generator/masternode_network.py new file mode 100644 index 0000000..8e994f7 --- /dev/null +++ b/generator/masternode_network.py @@ -0,0 +1,339 @@ +""" +Masternode Network Manager + +Manages multiple dashd instances for masternode network generation. +Handles node lifecycle, peer connections, and DKG cycle mining. +""" + +import shutil +import socket +import tempfile +import time +from pathlib import Path + +from .dashd_manager import DashdManager, dashd_preexec_fn +from .rpc_client import DashRPCClient + + +class MasternodeNode: + """A single dashd node in the masternode network.""" + + def __init__(self, name, dashd_path, datadir, rpc_port, p2p_port, extra_args=None): + self.name = name + self.dashd_path = dashd_path + self.datadir = Path(datadir) + self.rpc_port = rpc_port + self.p2p_port = p2p_port + self.extra_args = extra_args or [] + self.process = None + self.rpc = None + + def start(self): + """Start the dashd process.""" + import subprocess + + regtest_dir = self.datadir / "regtest" + regtest_dir.mkdir(parents=True, exist_ok=True) + + cmd = [ + self.dashd_path, + "-regtest", + f"-datadir={self.datadir}", + f"-port={self.p2p_port}", + f"-rpcport={self.rpc_port}", + "-server=1", + "-daemon=0", + "-fallbackfee=0.00001", + "-rpcbind=127.0.0.1", + "-rpcallowip=127.0.0.1", + "-listen=1", + "-txindex=0", + "-addressindex=0", + "-spentindex=0", + "-timestampindex=0", + ] + cmd.extend(self.extra_args) + + self.process = subprocess.Popen( + cmd, + stdout=subprocess.DEVNULL, + stderr=subprocess.PIPE, + cwd=str(self.datadir), + preexec_fn=dashd_preexec_fn, + ) + + # Derive dash-cli path + dashd_bin = Path(self.dashd_path) + if dashd_bin.is_absolute(): + dashcli_path = str(dashd_bin.parent / "dash-cli") + else: + dashcli_path = "dash-cli" + + self.rpc = DashRPCClient( + dashcli_path=dashcli_path, + datadir=str(self.datadir), + rpc_port=self.rpc_port, + ) + + # Wait for RPC to become ready + if not self._wait_for_ready(timeout=60): + self.stop() + raise RuntimeError(f"Node {self.name} failed to start within 60 seconds") + + print(f" {self.name} started (PID: {self.process.pid}, RPC: {self.rpc_port}, P2P: {self.p2p_port})") + + def _wait_for_ready(self, timeout=60): + """Wait for dashd to accept RPC calls.""" + start = time.time() + while time.time() - start < timeout: + if self.process.poll() is not None: + if self.process.stderr: + stderr = self.process.stderr.read().decode("utf-8", errors="replace").strip() + if stderr: + print(f" {self.name} exited with error: {stderr}") + return False + try: + self.rpc.call("getblockcount") + return True + except Exception: + time.sleep(0.5) + return False + + def stop(self): + """Stop the dashd process.""" + if self.process: + try: + self.process.terminate() + self.process.wait(timeout=15) + except Exception: + try: + self.process.kill() + self.process.wait() + except Exception: + pass + self.process = None + + +def find_free_port(start=19000, attempts=100): + """Find an available TCP port.""" + for port in range(start, start + attempts): + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + s.bind(("127.0.0.1", port)) + return port + except OSError: + continue + raise RuntimeError(f"No free port found in range {start}-{start + attempts - 1}") + + +class MasternodeNetwork: + """Manages a multi-node masternode network for test data generation. + + Topology: 1 controller + N masternode nodes, all connected. + """ + + def __init__(self, dashd_path, num_masternodes=4, base_extra_args=None): + self.dashd_path = dashd_path + self.num_masternodes = num_masternodes + self.base_extra_args = base_extra_args or [] + self.controller = None + self.masternodes = [] + self.temp_dirs = [] + self.masternode_info = [] # BLS keys, addresses, proTxHashes + self.mn_p2p_ports = [] # Pre-allocated P2P ports for MN registration + self.fund_address = None # Address holding mining rewards (set in bootstrap) + + def allocate_mn_ports(self): + """Pre-allocate P2P ports for masternodes (needed for protx registration).""" + base = 19950 + for i in range(self.num_masternodes): + p2p_port = find_free_port(base + i * 10) + self.mn_p2p_ports.append(p2p_port) + return self.mn_p2p_ports + + def start_controller(self, extra_args=None): + """Start the controller node from a fresh temp directory.""" + temp_dir = Path(tempfile.mkdtemp(prefix="dash-mn-controller-")) + self.temp_dirs.append(temp_dir) + + rpc_port = find_free_port(19900) + p2p_port = find_free_port(rpc_port + 1) + + all_args = list(self.base_extra_args) + if extra_args: + all_args.extend(extra_args) + # Block filter index for SPV testing + all_args.extend(["-blockfilterindex=1", "-peerblockfilters=1"]) + + self.controller = MasternodeNode( + name="controller", + dashd_path=self.dashd_path, + datadir=temp_dir, + rpc_port=rpc_port, + p2p_port=p2p_port, + extra_args=all_args, + ) + self.controller.start() + return self.controller + + def start_masternode_nodes(self, controller_datadir): + """Start masternode nodes from a copy of the controller's datadir. + + Each node gets a unique BLS private key. Connection and mnsync + must be handled by the caller after this method returns. + Must be called after masternodes have been registered on the controller. + """ + print("\n Starting masternode nodes...") + + # Stop controller briefly to copy its datadir + controller_rpc_port = self.controller.rpc_port + controller_p2p_port = self.controller.p2p_port + controller_extra = list(self.controller.extra_args) + controller_dir = self.controller.datadir + + self.controller.stop() + time.sleep(2) + + # Restart controller (with current mocktime if available) + restart_args = list(controller_extra) + if hasattr(self, '_mocktime') and self._mocktime: + # Remove any old mocktime arg and add current one + restart_args = [a for a in restart_args if not a.startswith("-mocktime=")] + restart_args.append(f"-mocktime={self._mocktime}") + self.controller = MasternodeNode( + name="controller", + dashd_path=self.dashd_path, + datadir=controller_dir, + rpc_port=controller_rpc_port, + p2p_port=controller_p2p_port, + extra_args=restart_args, + ) + self.controller.start() + + for i, mn_info in enumerate(self.masternode_info): + mn_name = f"mn{i + 1}" + temp_dir = Path(tempfile.mkdtemp(prefix=f"dash-{mn_name}-")) + self.temp_dirs.append(temp_dir) + + # Copy controller's regtest data (blockchain, chainstate, evodb, llmq) + src = controller_dir / "regtest" + dst = temp_dir / "regtest" + shutil.copytree(src, dst) + # Remove stale network state from the copy + for stale_file in ["peers.dat", "banlist.json", "mempool.dat", ".lock"]: + stale_path = dst / stale_file + if stale_path.exists(): + stale_path.unlink() + + # Use pre-allocated P2P port if available, otherwise find a free one + if i < len(self.mn_p2p_ports): + p2p_port = self.mn_p2p_ports[i] + else: + p2p_port = find_free_port(controller_p2p_port + 10 + i * 10) + rpc_port = find_free_port(p2p_port + 1) + + mn_args = list(self.base_extra_args) + mn_args.extend([ + "-blockfilterindex=1", + "-peerblockfilters=1", + "-txindex=1", + f"-masternodeblsprivkey={mn_info['bls_private_key']}", + ]) + # Pass mocktime at startup so DKG scheduling works + if hasattr(self, '_mocktime') and self._mocktime: + mn_args.append(f"-mocktime={self._mocktime}") + + node = MasternodeNode( + name=mn_name, + dashd_path=self.dashd_path, + datadir=temp_dir, + rpc_port=rpc_port, + p2p_port=p2p_port, + extra_args=mn_args, + ) + node.start() + self.masternodes.append(node) + + def connect_all(self): + """Connect all nodes to each other following Dash Core's test framework. + + Disables masternode threads during connection to prevent interference + with the P2P handshake (matching DashTestFramework.connect_nodes). + Uses "onetry" mode as the Dash Core test framework does. + """ + # Disable MN threads during connection (prevents handshake interference) + for mn in self.masternodes: + try: + mn.rpc.call("setmnthreadactive", False) + except Exception: + pass + + # Connect each MN to the controller + for mn in self.masternodes: + controller_addr = f"127.0.0.1:{self.controller.p2p_port}" + try: + mn.rpc.call("addnode", controller_addr, "onetry") + except Exception as e: + print(f" Warning: addnode {mn.name}->controller failed: {e}") + + # Also connect controller to each MN + for mn in self.masternodes: + mn_addr = f"127.0.0.1:{mn.p2p_port}" + try: + self.controller.rpc.call("addnode", mn_addr, "onetry") + except Exception as e: + print(f" Warning: addnode controller->{mn.name} failed: {e}") + + # Re-enable MN threads + for mn in self.masternodes: + try: + mn.rpc.call("setmnthreadactive", True) + except Exception: + pass + + # Wait for connections to establish + peer_count = 0 + for attempt in range(15): + time.sleep(2) + peer_count = len(self.controller.rpc.call("getpeerinfo")) + if peer_count >= len(self.masternodes): + break + print(f" Controller has {peer_count} peers connected") + + def stop_all(self): + """Stop all nodes.""" + for mn in self.masternodes: + mn.stop() + if self.controller: + self.controller.stop() + + def cleanup(self): + """Stop all nodes and remove temp directories.""" + self.stop_all() + for temp_dir in self.temp_dirs: + shutil.rmtree(temp_dir, ignore_errors=True) + self.temp_dirs.clear() + + def generate_blocks(self, count, address=None): + """Mine blocks on the controller node.""" + rpc = self.controller.rpc + if address is None: + address = self.fund_address or rpc.call("getnewaddress") + return rpc.call("generatetoaddress", count, address) + + def wait_for_sync(self, timeout=30): + """Wait for all nodes to reach the same block height as controller.""" + target = self.controller.rpc.call("getblockcount") + start = time.time() + while time.time() - start < timeout: + all_synced = True + for mn in self.masternodes: + height = mn.rpc.call("getblockcount") + if height < target: + all_synced = False + break + if all_synced: + return True + time.sleep(0.5) + return False From 362ae70111353c7033650c3444c9f05e93de4f94 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Fri, 13 Feb 2026 13:53:30 +0100 Subject: [PATCH 2/8] fix: use default LLMQ_TEST_DIP0024 for quorum rotation Remove the -llmqtestinstantsenddip0024=llmq_test_instantsend override that was switching the DIP0024 InstantSend quorum type from LLMQ_TEST_DIP0024 (useRotation=true) to LLMQ_TEST_INSTANTSEND (useRotation=false). This prevented quorum snapshots from being created during block processing, causing BuildQuorumRotationInfo to fail with "Cannot find quorum snapshot". Also use RPC stop for clean node shutdown to flush evoDB. --- generate_masternode.py | 1 - generator/masternode_network.py | 12 +++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/generate_masternode.py b/generate_masternode.py index 1a78e37..2e918f1 100644 --- a/generate_masternode.py +++ b/generate_masternode.py @@ -37,7 +37,6 @@ "-dip3params=2:2", "-testactivationheight=v20@100", "-testactivationheight=mn_rr@100", - "-llmqtestinstantsenddip0024=llmq_test_instantsend", ] diff --git a/generator/masternode_network.py b/generator/masternode_network.py index 8e994f7..9bccd04 100644 --- a/generator/masternode_network.py +++ b/generator/masternode_network.py @@ -100,8 +100,18 @@ def _wait_for_ready(self, timeout=60): return False def stop(self): - """Stop the dashd process.""" + """Stop the dashd process gracefully via RPC, falling back to SIGTERM.""" if self.process: + # Try RPC stop first for clean shutdown (flushes evoDB, quorum snapshots) + if self.rpc: + try: + self.rpc.call("stop") + self.process.wait(timeout=30) + self.process = None + return + except Exception: + pass + # Fallback to SIGTERM try: self.process.terminate() self.process.wait(timeout=15) From fc6907aa50332607e776143779649369e7d98274 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Fri, 17 Apr 2026 18:54:38 +1000 Subject: [PATCH 3/8] fix: generate real DIP-0024 rotating quorums in masternode chain MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rework `phase_5_mine_dkg_cycles` to follow Dash Core's `mine_cycle_quorum` flow from `test/functional/test_framework/test_framework.py`. The previous generator drove only a single `llmq_test` (type 100) session per cycle and never orchestrated the interleaved `llmq_test_dip0024` (type 103) rotating DKG, so every DIP-0024 commitment landed as null and the SPV consumer tests saw zeroed `signers`, `validMembers`, and `quorumPublicKey` for type 103. Each cycle now: - advances 3 full DKG intervals before the first run (`cycle_quorum_is_ready` warmup) so rotating quorums can form past `H+3C`, matching `feature_llmq_rotation.py` - walks phases 1-6 block-by-block, interleaving `q_0` and `q_1` for type 103 while type 100 piggybacks naturally on the shared `dkgInterval=24` - gates every phase on the expected DKG message counts (`receivedContributions`, `receivedComplaints`, `receivedJustifications`, `receivedPrematureCommitments`) so blocks never advance past a phase before real messages have been exchanged - mines a single commit block at `cycle+12` via `getblocktemplate + generate(1)`, matching `mine_cycle_quorum`'s terminal step - verifies all three real quorums via `quorum list` (no `count` arg, so the default returns both `q_0` and `q_1` for rotating types — the prior `count=1` dropped `q_1` from the response even when it was successfully mined) - mines 8 signing-window maturity blocks Also seeds direct masternode↔masternode `addnode` connections in `MasternodeNetwork.connect_all` so DKG phase 2 sees real contributions instead of racing against the quorum manager's lazy connection build, and switches node shutdown to RPC `stop` with a SIGTERM fallback so evoDB and quorum snapshots are flushed cleanly. --- generate_masternode.py | 543 ++++++++++++++++++++++---------- generator/masternode_network.py | 34 +- 2 files changed, 409 insertions(+), 168 deletions(-) mode change 100644 => 100755 generate_masternode.py diff --git a/generate_masternode.py b/generate_masternode.py old mode 100644 new mode 100755 index 2e918f1..02aa081 --- a/generate_masternode.py +++ b/generate_masternode.py @@ -31,6 +31,8 @@ TIME_GENESIS_BLOCK = 1417713337 DKG_INTERVAL = 24 NUM_MASTERNODES = 4 +LLMQ_TEST_SIZE = 3 # llmq_test (type 100) - 3 members out of 4 MNs +LLMQ_TEST_DIP0024_SIZE = 4 # llmq_test_dip0024 (type 103) - all 4 MNs, minSize=4 SPORK_PRIVATE_KEY = "cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK" DASHD_EXTRA_ARGS = [ @@ -94,10 +96,25 @@ def _force_finish_mnsync(node): time.sleep(0.5) -def _wait_for_quorum_phase(network, quorum_hash, phase, expected_members=None, timeout=60): - """Wait for masternodes to reach a DKG phase.""" - if expected_members is None: - expected_members = len(network.masternodes) +def _wait_for_quorum_phase( + network, + llmq_type_name, + quorum_hash, + phase, + expected_members, + check_received_messages=None, + check_received_messages_count=0, + timeout=60, +): + """Wait for masternodes to reach a DKG phase with optional message count gating. + + Mirrors Dash Core test_framework.wait_for_quorum_phase: when + `check_received_messages` is set, a masternode is only counted once its + session for (llmq_type_name, quorum_hash) reports at least + `check_received_messages_count` for that field. Without this gate, phase + transitions advance before contributions/premature commitments have been + exchanged, producing null DKG commitments. + """ start = time.time() while time.time() - start < timeout: member_count = 0 @@ -105,12 +122,17 @@ def _wait_for_quorum_phase(network, quorum_hash, phase, expected_members=None, t try: status = mn.rpc.call("quorum", "dkgstatus") for s in status.get("session", []): - if s.get("llmqType") != "llmq_test": + if s.get("llmqType") != llmq_type_name: continue qs = s.get("status", {}) - if qs.get("quorumHash") == quorum_hash and qs.get("phase") == phase: + if qs.get("quorumHash") != quorum_hash: + continue + if qs.get("phase") == phase and ( + check_received_messages is None + or qs.get(check_received_messages, 0) >= check_received_messages_count + ): member_count += 1 - break + break except Exception: pass if member_count >= expected_members: @@ -120,68 +142,70 @@ def _wait_for_quorum_phase(network, quorum_hash, phase, expected_members=None, t return False -def _wait_for_quorum_connections(network, quorum_hash, timeout=60): - """Wait for masternodes to establish quorum connections.""" - start = time.time() - while time.time() - start < timeout: - all_connected = True - for mn in network.masternodes: - try: - status = mn.rpc.call("quorum", "dkgstatus") - found_session = False - for s in status.get("session", []): - if s.get("llmqType") != "llmq_test": - continue - if s.get("status", {}).get("quorumHash") == quorum_hash: - found_session = True - break - if not found_session: - all_connected = False - break - except Exception: - all_connected = False - break - if all_connected: - return True - _bump_mocktime(network, 1) - time.sleep(0.3) - return False - +def _wait_for_quorum_connections( + network, llmq_type_name, quorum_hash, expected_members, expected_connections, timeout=60 +): + """Wait for actual TCP connections to be established for the quorum. -def _wait_for_quorum_commitment(network, quorum_hash, timeout=30): - """Wait for minable commitments on all masternodes.""" + Requires `expected_members` masternodes to each report at least + `expected_connections` peers in the connected state in their + `quorumConnections` entry for (llmq_type_name, quorum_hash). + """ start = time.time() while time.time() - start < timeout: - all_ready = True + ready_members = 0 for mn in network.masternodes: try: status = mn.rpc.call("quorum", "dkgstatus") - found = False - for c in status.get("minableCommitments", []): - if c.get("llmqType") == 100 and c.get("quorumHash") == quorum_hash: - if c.get("quorumPublicKey", "0" * 96) != "0" * 96: - found = True - break - if not found: - all_ready = False - break + sessions = status.get("session", []) + has_session = any( + s.get("llmqType") == llmq_type_name and s.get("status", {}).get("quorumHash") == quorum_hash + for s in sessions + ) + if not has_session: + continue + + group = next( + ( + g + for g in status.get("quorumConnections", []) + if g.get("llmqType") == llmq_type_name and g.get("quorumHash") == quorum_hash + ), + None, + ) + if not group: + continue + + peers = group.get("quorumConnections", []) + connected = sum(1 for p in peers if p.get("connected") is True) + if connected >= expected_connections: + ready_members += 1 except Exception: - all_ready = False - break - if all_ready: + pass + if ready_members >= expected_members: return True + _bump_mocktime(network, 1) time.sleep(0.5) return False -def _wait_for_quorum_list(network, quorum_hash, timeout=15): - """Wait for quorum to appear in the quorum list.""" +def _wait_for_quorum_list(network, llmq_type_name, quorum_hashes, timeout=15): + """Wait for every hash in `quorum_hashes` to appear in `quorum list` for the type. + + Calls `quorum list` without a count argument so dashd returns up to + `signingActiveQuorumCount` quorums per type (2 for type 103, 2 for type + 100). Passing `count=1` would only return one entry, which hides the + second quorum of a rotating DIP-0024 cycle (q_1) even when it was + successfully mined. + """ rpc = network.controller.rpc + hashes = list(quorum_hashes) start = time.time() while time.time() - start < timeout: try: - qlist = rpc.call("quorum", "list", 1) - if quorum_hash in qlist.get("llmq_test", []): + qlist = rpc.call("quorum", "list") + listed = qlist.get(llmq_type_name, []) + if all(h in listed for h in hashes): return True except Exception: pass @@ -217,7 +241,7 @@ def phase_1_bootstrap(network): # Mine blocks for maturity + activation (need coins to be spendable) # Mine in batches with mocktime bumps (like Dash Core's cache setup) - for batch in range(5): + for _ in range(5): _bump_mocktime(network, 25 * 156) rpc.call("generatetoaddress", 25, network.fund_address) @@ -254,7 +278,8 @@ def phase_2_register_masternodes(network): # register_fund: collateral and fees come from fund_address pro_tx_hash = rpc.call( - "protx", "register_fund", + "protx", + "register_fund", collateral_addr, [service_addr], owner_addr, @@ -366,128 +391,329 @@ def phase_4_enable_sporks(network): _bump_mocktime(network, 1) sporks = rpc.call("spork", "show") - for name in ["SPORK_17_QUORUM_DKG_ENABLED", "SPORK_21_QUORUM_ALL_CONNECTED", - "SPORK_2_INSTANTSEND_ENABLED", "SPORK_19_CHAINLOCKS_ENABLED"]: + for name in [ + "SPORK_17_QUORUM_DKG_ENABLED", + "SPORK_21_QUORUM_ALL_CONNECTED", + "SPORK_2_INSTANTSEND_ENABLED", + "SPORK_19_CHAINLOCKS_ENABLED", + ]: value = sporks.get(name, "unknown") print(f" {name}: {value}") -def phase_5_mine_dkg_cycles(network, num_cycles): - """Mine DKG cycles following Dash Core's mine_quorum() pattern.""" - print("\n" + "=" * 60) - print(f"Phase 5: Mine {num_cycles} DKG cycles") - print("=" * 60) +def _phase_checks_for(phase): + """Return the (field, count_for_type100, count_for_type103) message gate per phase. + + Follows Dash Core's mine_quorum / mine_cycle_quorum expectations: + - phase 2 (contribute): receivedContributions == size + - phase 3 (complain): receivedComplaints == 0 + - phase 4 (justify): receivedJustifications == 0 + - phase 5 (commit): receivedPrematureCommitments == size + - phases 1/6: no message gate + """ + if phase == 2: + return "receivedContributions", LLMQ_TEST_SIZE, LLMQ_TEST_DIP0024_SIZE + if phase == 3: + return "receivedComplaints", 0, 0 + if phase == 4: + return "receivedJustifications", 0, 0 + if phase == 5: + return "receivedPrematureCommitments", LLMQ_TEST_SIZE, LLMQ_TEST_DIP0024_SIZE + return None, 0, 0 + + +def _dump_dkg_status(network, llmq_type_name, quorum_hash): + """Print per-masternode DKG session state and commitment state for diagnosis.""" + type_num = {"llmq_test": 100, "llmq_test_dip0024": 103}.get(llmq_type_name) + # Check if the quorum has already been mined - a non-null commitment that lands + # in a block is removed from `minableCommitments`, so "no entry" can mean either + # "not yet constructed" or "already committed to chain". Look at `quorum list` + # on the controller to distinguish. + try: + qlist = network.controller.rpc.call("quorum", "list", 10) + listed_for_type = qlist.get(llmq_type_name, []) + if quorum_hash in listed_for_type: + print(f" controller quorum list: {llmq_type_name} {quorum_hash[:16]} IS LISTED") + else: + print(f" controller quorum list ({llmq_type_name}): {[h[:16] for h in listed_for_type]}") + except Exception as e: + print(f" controller quorum list error: {e}") + # Also inspect the latest block to see if it contains a non-null commitment. + try: + best_hash = network.controller.rpc.call("getbestblockhash") + block = network.controller.rpc.call("getblock", best_hash, 2) + height = block.get("height") + for tx in block.get("tx", []): + if tx.get("type") == 6 or "qc" in tx or "qcTx" in tx: + print(f" tip block {height} has special tx type={tx.get('type')} txid={tx.get('txid', '')[:16]}") + except Exception as e: + print(f" getblock tip error: {e}") + for mn in network.masternodes: + try: + status = mn.rpc.call("quorum", "dkgstatus") + except Exception as e: + print(f" {mn.name}: dkgstatus error: {e}") + continue + session = next( + ( + s + for s in status.get("session", []) + if s.get("llmqType") == llmq_type_name and s.get("status", {}).get("quorumHash") == quorum_hash + ), + None, + ) + if session is None: + print(f" {mn.name}: no {llmq_type_name} session for {quorum_hash[:16]}") + else: + qs = session.get("status", {}) + print( + f" {mn.name}: phase={qs.get('phase')} " + f"sent=c:{qs.get('sentContributions')},co:{qs.get('sentComplaint')}," + f"j:{qs.get('sentJustification')},pc:{qs.get('sentPrematureCommitment')} " + f"aborted={qs.get('aborted')} bad={qs.get('badMembers')} " + f"recv=c:{qs.get('receivedContributions', 0)},co:{qs.get('receivedComplaints', 0)}," + f"j:{qs.get('receivedJustifications', 0)},pc:{qs.get('receivedPrematureCommitments', 0)}" + ) + all_commits = status.get("minableCommitments", []) + print(f" minableCommitments total={len(all_commits)}") + for commit in all_commits: + pk = commit.get("quorumPublicKey", "?") + mark = ( + " <-- expected" + if ( + commit.get("quorumHash") == quorum_hash and (type_num is None or commit.get("llmqType") == type_num) + ) + else "" + ) + print( + f" type={commit.get('llmqType')} idx={commit.get('quorumIndex')} " + f"qh={commit.get('quorumHash', '')[:16]} " + f"signers={commit.get('signersCount')}/{commit.get('validMembersCount')} " + f"pkHead={pk[:16]}{mark}" + ) + conn_group = next( + ( + g + for g in status.get("quorumConnections", []) + if g.get("llmqType") == llmq_type_name and g.get("quorumHash") == quorum_hash + ), + None, + ) + if conn_group is not None: + peers = conn_group.get("quorumConnections", []) + connected = sum(1 for p in peers if p.get("connected") is True) + print(f" quorumConnections: {connected}/{len(peers)} connected") + + +class DKGCycleError(RuntimeError): + """Raised when a DKG cycle step fails to complete in time.""" + + +def _require(cond, message, network=None, llmq_type_name=None, quorum_hash=None): + if cond: + return + if network is not None and quorum_hash is not None: + print(f"\n Diagnostic for {llmq_type_name} {quorum_hash[:16]}:") + _dump_dkg_status(network, llmq_type_name or "llmq_test", quorum_hash) + raise DKGCycleError(message) + +def _run_single_dkg_cycle(network, cycle_idx, num_cycles, cycle_quorum_is_ready): + """Mine one DKG cycle producing real type-100 AND rotating type-103 quorums. + + Aligns to the next DKG boundary, then walks blocks one at a time so the two + DIP-0024 rotating sessions (q_0 at cycle start, q_1 one block later) can + be interleaved in phase checks alongside the single llmq_test session. + All phase transitions are gated on the expected DKG message counts so the + chain never advances past a phase before real messages have been exchanged. + + On the first call (`cycle_quorum_is_ready=False`), mines 3 extra DKG cycles + as required by DIP-0024. Per feature_llmq_rotation.py and the `extra_blocks` + branch in test_framework.mine_cycle_quorum, the first three "quarters" after + v20/mn_rr activation are built without a DKG session, so the chain must + advance past H+3C before the first rotating quorum can form. + + Raises DKGCycleError on any timeout or missing quorum. + """ rpc = network.controller.rpc - completed_cycles = 0 - # LLMQ_TEST has 3 members out of our 4 MNs - expected_members = 3 - for cycle in range(num_cycles): - height = rpc.call("getblockcount") - - # Move to next DKG cycle start - skip_count = DKG_INTERVAL - (height % DKG_INTERVAL) - if skip_count != 0 and skip_count != DKG_INTERVAL: - _bump_mocktime(network, 1) - network.generate_blocks(skip_count) - network.wait_for_sync() - - quorum_hash = rpc.call("getbestblockhash") - height = rpc.call("getblockcount") - print(f"\n Cycle {cycle + 1}/{num_cycles} at height {height}") - - # Phase 1: Init - wait for quorum connections - print(" Phase 1 (init)...", end="", flush=True) - if not _wait_for_quorum_phase(network, quorum_hash, 1, expected_members, timeout=60): - print(" timeout") - # Debug output - for mn in network.masternodes: - try: - status = mn.rpc.call("quorum", "dkgstatus") - sessions = status.get("session", []) - print(f" {mn.name}: time={status.get('time')}, sessions={len(sessions)}, " - f"tip={status.get('session', [{}])[0].get('status', {}).get('quorumHash', 'none')[:16] if sessions else 'none'}") - except Exception as e: - print(f" {mn.name}: error {e}") - _move_blocks(network, DKG_INTERVAL) - continue - _wait_for_quorum_connections(network, quorum_hash, timeout=60) + # Align to the next cycle boundary, with 3-cycle warmup on the first call. + height = rpc.call("getblockcount") + skip_count = DKG_INTERVAL - (height % DKG_INTERVAL) + if skip_count == DKG_INTERVAL: + skip_count = 0 + warmup_blocks = 0 if cycle_quorum_is_ready else DKG_INTERVAL * 3 + total_move = warmup_blocks + skip_count + if total_move > 0: + if warmup_blocks > 0: + print(f" DIP-0024 warmup: mining {total_move} blocks before first cycle...") + _move_blocks(network, total_move) + + q_0 = rpc.call("getbestblockhash") + height = rpc.call("getblockcount") + print(f"\n Cycle {cycle_idx + 1}/{num_cycles} at height {height} q_0={q_0[:16]}...") + + # Phase 1 (init) on q_0 for both types, plus connections + print(" Phase 1 (init) q_0...", end="", flush=True) + _require( + _wait_for_quorum_phase(network, "llmq_test", q_0, 1, LLMQ_TEST_SIZE, timeout=60), + "phase 1 timeout (llmq_test q_0)", + network, + "llmq_test", + q_0, + ) + _require( + _wait_for_quorum_phase(network, "llmq_test_dip0024", q_0, 1, LLMQ_TEST_DIP0024_SIZE, timeout=60), + "phase 1 timeout (llmq_test_dip0024 q_0)", + network, + "llmq_test_dip0024", + q_0, + ) + _require( + _wait_for_quorum_connections(network, "llmq_test", q_0, LLMQ_TEST_SIZE, LLMQ_TEST_SIZE - 1, timeout=60), + "quorum connection timeout (llmq_test q_0)", + network, + "llmq_test", + q_0, + ) + _require( + _wait_for_quorum_connections( + network, "llmq_test_dip0024", q_0, LLMQ_TEST_DIP0024_SIZE, LLMQ_TEST_DIP0024_SIZE - 1, timeout=60 + ), + "quorum connection timeout (llmq_test_dip0024 q_0)", + network, + "llmq_test_dip0024", + q_0, + ) + print(" ok") + + # Advance 1 block -> q_1 (the rotating pair's second quorum) enters phase 1 + _move_blocks(network, 1) + q_1 = rpc.call("getbestblockhash") + print(f" Phase 1 (init) q_1={q_1[:16]}...", end="", flush=True) + _require( + _wait_for_quorum_phase(network, "llmq_test_dip0024", q_1, 1, LLMQ_TEST_DIP0024_SIZE, timeout=60), + "phase 1 timeout (llmq_test_dip0024 q_1)", + network, + "llmq_test_dip0024", + q_1, + ) + _require( + _wait_for_quorum_connections( + network, "llmq_test_dip0024", q_1, LLMQ_TEST_DIP0024_SIZE, LLMQ_TEST_DIP0024_SIZE - 1, timeout=60 + ), + "quorum connection timeout (llmq_test_dip0024 q_1)", + network, + "llmq_test_dip0024", + q_1, + ) + print(" ok") + + # Walk phases 2-6 block-by-block, alternating q_0 and q_1 checks. + # At each even block of the cycle, q_0 enters the next phase (together + # with the llmq_test session). At each odd block, q_1 enters it. + for phase in range(2, 7): + field, count_test, count_dip0024 = _phase_checks_for(phase) + _move_blocks(network, 1) # enter phase on q_0 + type 100 + + phase_name = {2: "contribute", 3: "complain", 4: "justify", 5: "commit", 6: "finalize"}[phase] + print(f" Phase {phase} ({phase_name}) q_0...", end="", flush=True) + _require( + _wait_for_quorum_phase(network, "llmq_test", q_0, phase, LLMQ_TEST_SIZE, field, count_test, timeout=45), + f"phase {phase} timeout (llmq_test q_0)", + network, + "llmq_test", + q_0, + ) + _require( + _wait_for_quorum_phase( + network, "llmq_test_dip0024", q_0, phase, LLMQ_TEST_DIP0024_SIZE, field, count_dip0024, timeout=45 + ), + f"phase {phase} timeout (llmq_test_dip0024 q_0)", + network, + "llmq_test_dip0024", + q_0, + ) + print(" ok", end="", flush=True) + + _move_blocks(network, 1) # enter phase on q_1 + print(" q_1...", end="", flush=True) + _require( + _wait_for_quorum_phase( + network, "llmq_test_dip0024", q_1, phase, LLMQ_TEST_DIP0024_SIZE, field, count_dip0024, timeout=45 + ), + f"phase {phase} timeout (llmq_test_dip0024 q_1)", + network, + "llmq_test_dip0024", + q_1, + ) print(" ok") - _move_blocks(network, 2) - # Phase 2: Contribute - print(" Phase 2 (contribute)...", end="", flush=True) - if not _wait_for_quorum_phase(network, quorum_hash, 2, expected_members, timeout=30): - print(" timeout") - _move_blocks(network, DKG_INTERVAL) - continue - print(" ok") - _move_blocks(network, 2) + # Mine the commit block. At cycle+12 the controller creates a block that + # includes the real finalCommitment txs for type 100 (window [cycle+10, + # cycle+18]) and for both type 103 rotating quorums (window [cycle+12, + # cycle+20]). Mirrors the final mining step in test_framework's + # mine_cycle_quorum (getblocktemplate + generate(1)). + _bump_mocktime(network, 1) + rpc.call("getblocktemplate") + _move_blocks(network, 1) + + # Confirm all three real quorums were recorded by the chain. `quorum list` + # reflects commitments stored in evoDB by ProcessCommitment, which only + # writes non-null commitments. If any quorum is missing, its commitment + # was null in the mined block. + _require( + _wait_for_quorum_list(network, "llmq_test", [q_0], timeout=15), + "llmq_test q_0 missing from quorum list after commit block", + network, + "llmq_test", + q_0, + ) + _require( + _wait_for_quorum_list(network, "llmq_test_dip0024", [q_0, q_1], timeout=15), + "llmq_test_dip0024 q_0/q_1 missing from quorum list after commit block", + network, + "llmq_test_dip0024", + q_0, + ) - # Phase 3: Complain - print(" Phase 3 (complain)...", end="", flush=True) - if not _wait_for_quorum_phase(network, quorum_hash, 3, expected_members, timeout=30): - print(" timeout") - _move_blocks(network, DKG_INTERVAL) - continue - print(" ok") - _move_blocks(network, 2) + # Mine 8 blocks (SIGN_HEIGHT_OFFSET) for signing-window maturity, matching + # the tail of test_framework.mine_cycle_quorum. + _move_blocks(network, 8) - # Phase 4: Justify - print(" Phase 4 (justify)...", end="", flush=True) - if not _wait_for_quorum_phase(network, quorum_hash, 4, expected_members, timeout=30): - print(" timeout") - _move_blocks(network, DKG_INTERVAL) - continue - print(" ok") - _move_blocks(network, 2) + qlist = rpc.call("quorum", "list") + print( + f" Cycle complete: llmq_test={len(qlist.get('llmq_test', []))}, " + f"llmq_test_dip0024={len(qlist.get('llmq_test_dip0024', []))}" + ) - # Phase 5: Commit - print(" Phase 5 (commit)...", end="", flush=True) - if not _wait_for_quorum_phase(network, quorum_hash, 5, expected_members, timeout=30): - print(" timeout") - _move_blocks(network, DKG_INTERVAL) - continue - print(" ok") - _move_blocks(network, 2) - # Phase 6: Mining - print(" Phase 6 (mining)...", end="", flush=True) - if not _wait_for_quorum_phase(network, quorum_hash, 6, expected_members, timeout=30): - print(" timeout") - _move_blocks(network, DKG_INTERVAL) - continue - print(" ok") +def phase_5_mine_dkg_cycles(network, num_cycles): + """Mine `num_cycles` combined DKG cycles (type 100 + rotating type 103). - # Wait for final commitment - print(" Waiting for commitment...", end="", flush=True) - if not _wait_for_quorum_commitment(network, quorum_hash, timeout=30): - print(" timeout") - _move_blocks(network, DKG_INTERVAL) - continue - print(" ok") + Each cycle is gated on real DKG message exchange, so every produced + commitment has signers>0, validMembers>0, and a non-zero quorumPublicKey. + """ + print("\n" + "=" * 60) + print(f"Phase 5: Mine {num_cycles} DKG cycles") + print("=" * 60) - # Mine the commitment block (getblocktemplate triggers CreateNewBlock) - _bump_mocktime(network, 1) - rpc.call("getblocktemplate") - _move_blocks(network, 1) - - # Verify quorum appeared in the list - if _wait_for_quorum_list(network, quorum_hash, timeout=15): - # Mine 8 blocks for quorum maturity - _move_blocks(network, 8) - completed_cycles += 1 - total = len(rpc.call("quorum", "list").get("llmq_test", [])) - print(f" Quorum formed (total: {total})") - else: - print(f" Quorum not in list") + completed_cycles = 0 + cycle_quorum_is_ready = False + for cycle in range(num_cycles): + _run_single_dkg_cycle(network, cycle, num_cycles, cycle_quorum_is_ready) + # Warmup is applied on the first call; subsequent cycles skip it. + cycle_quorum_is_ready = True + completed_cycles += 1 + rpc = network.controller.rpc height = rpc.call("getblockcount") quorum_list = rpc.call("quorum", "list") print(f"\n Completed {completed_cycles}/{num_cycles} DKG cycles (height: {height})") - print(f" Quorums: llmq_test={len(quorum_list.get('llmq_test', []))}, " - f"llmq_test_dip0024={len(quorum_list.get('llmq_test_dip0024', []))}") + print( + f" Quorums: llmq_test={len(quorum_list.get('llmq_test', []))}, " + f"llmq_test_dip0024={len(quorum_list.get('llmq_test_dip0024', []))}" + ) return completed_cycles @@ -646,6 +872,7 @@ def main(): except Exception as e: print(f"\nGeneration failed: {e}") import traceback + traceback.print_exc() sys.exit(1) finally: diff --git a/generator/masternode_network.py b/generator/masternode_network.py index 9bccd04..d333e5a 100644 --- a/generator/masternode_network.py +++ b/generator/masternode_network.py @@ -11,7 +11,7 @@ import time from pathlib import Path -from .dashd_manager import DashdManager, dashd_preexec_fn +from .dashd_manager import dashd_preexec_fn from .rpc_client import DashRPCClient @@ -207,7 +207,7 @@ def start_masternode_nodes(self, controller_datadir): # Restart controller (with current mocktime if available) restart_args = list(controller_extra) - if hasattr(self, '_mocktime') and self._mocktime: + if hasattr(self, "_mocktime") and self._mocktime: # Remove any old mocktime arg and add current one restart_args = [a for a in restart_args if not a.startswith("-mocktime=")] restart_args.append(f"-mocktime={self._mocktime}") @@ -244,14 +244,16 @@ def start_masternode_nodes(self, controller_datadir): rpc_port = find_free_port(p2p_port + 1) mn_args = list(self.base_extra_args) - mn_args.extend([ - "-blockfilterindex=1", - "-peerblockfilters=1", - "-txindex=1", - f"-masternodeblsprivkey={mn_info['bls_private_key']}", - ]) + mn_args.extend( + [ + "-blockfilterindex=1", + "-peerblockfilters=1", + "-txindex=1", + f"-masternodeblsprivkey={mn_info['bls_private_key']}", + ] + ) # Pass mocktime at startup so DKG scheduling works - if hasattr(self, '_mocktime') and self._mocktime: + if hasattr(self, "_mocktime") and self._mocktime: mn_args.append(f"-mocktime={self._mocktime}") node = MasternodeNode( @@ -295,6 +297,18 @@ def connect_all(self): except Exception as e: print(f" Warning: addnode controller->{mn.name} failed: {e}") + # Direct MN<->MN connections so DKG message exchange does not have to + # wait for the quorum manager to build them lazily. Without these, + # DIP-0024 DKGs (4 members, minSize=4) can advance past phase 2 before + # contributions have propagated, producing null commitments. + for i, mn_a in enumerate(self.masternodes): + for mn_b in self.masternodes[i + 1 :]: + target = f"127.0.0.1:{mn_b.p2p_port}" + try: + mn_a.rpc.call("addnode", target, "onetry") + except Exception as e: + print(f" Warning: addnode {mn_a.name}->{mn_b.name} failed: {e}") + # Re-enable MN threads for mn in self.masternodes: try: @@ -304,7 +318,7 @@ def connect_all(self): # Wait for connections to establish peer_count = 0 - for attempt in range(15): + for _ in range(15): time.sleep(2) peer_count = len(self.controller.rpc.call("getpeerinfo")) if peer_count >= len(self.masternodes): From 1f18558797f23048504bba889ad79ff18691678d Mon Sep 17 00:00:00 2001 From: xdustinface Date: Fri, 17 Apr 2026 19:06:18 +1000 Subject: [PATCH 4/8] refactor: clean up masternode generator script Consolidate scattered lifecycle helpers and remove dead plumbing uncovered while fixing the DIP-0024 cycle generator. - Move mocktime control onto `MasternodeNetwork` as `set_mocktime` / `bump_mocktime` / `move_blocks`, plus `all_nodes()`. The previous free functions poked at `network._mocktime` from outside the class and the `hasattr(self, "_mocktime")` guards in `start_masternode_nodes` were paranoid dead checks since `_mocktime` is always set before any node starts. - Move `force_finish_mnsync` and mocktime application onto `MasternodeNode`. - Hoist `import subprocess` to the module top (per the project's import-at-top rule). - Drop the unreachable `find_free_port(controller_p2p_port + ...)` fallback in `start_masternode_nodes`: `allocate_mn_ports()` always runs first, so `mn_p2p_ports` is always populated. Replace with a direct `assert`. - Replace the `MasternodeConfig` dataclass with direct use of argparse `args`. It was a three-field indirection used exactly once. - Remove the `completed_cycles` counter and the `dkg_cycles` pass-through from `phase_5_mine_dkg_cycles` to `phase_7_export`. Since `_run_single_dkg_cycle` raises on failure, the counter always equalled `num_cycles`. - Make `wait_for_quorum_phase`, `wait_for_quorum_connections`, and `wait_for_quorum_list` raise `DKGCycleError` with a diagnostic dump on timeout. This eliminates ~12 call sites of 6-line `_require(...)` boilerplate and pushes the context into the helper that has it. - Extract `_find_session`, `_find_connection_group`, `_wait_for_dkg_phase`, and `PHASE_GATES` / `PHASE_NAMES` / `LLMQ_TYPE_NUM` tables to collapse duplicated dkgstatus navigation and the per-phase q_0/q_1 interleaving. - Extract a small `try_addnode` closure in `MasternodeNetwork.connect_all` to replace the four copy-pasted `try/except` addnode blocks. Net effect: `generate_masternode.py` shrinks from 884 to ~690 lines and `masternode_network.py` from 363 to ~400 lines (extra methods offset by removed boilerplate). Verified end-to-end with `--dkg-cycles 2`: real `llmq_test` and `llmq_test_dip0024` quorums formed in every cycle. --- generate_masternode.py | 706 ++++++++++++-------------------- generator/masternode_network.py | 126 ++++-- 2 files changed, 353 insertions(+), 479 deletions(-) diff --git a/generate_masternode.py b/generate_masternode.py index 02aa081..c2b86fe 100755 --- a/generate_masternode.py +++ b/generate_masternode.py @@ -18,7 +18,6 @@ import shutil import sys import time -from dataclasses import dataclass from pathlib import Path sys.path.insert(0, str(Path(__file__).parent)) @@ -41,62 +40,112 @@ "-testactivationheight=mn_rr@100", ] +# Maps llmq_type_name -> numeric llmq type (matches Dash Core consensus enum). +LLMQ_TYPE_NUM = {"llmq_test": 100, "llmq_test_dip0024": 103} -@dataclass -class MasternodeConfig: - dashd_path: str - dkg_cycles: int - output_dir: str +# Message-count gates per DKG phase. Phase 2/5 require a message from every +# member; phases 3/4 expect zero in a healthy cycle (no complaints/justifications). +# Phases 1/6 have no message gate. Mirrors Dash Core's mine_quorum / +# mine_cycle_quorum expectations. +PHASE_GATES = { + 2: ("receivedContributions", LLMQ_TEST_SIZE, LLMQ_TEST_DIP0024_SIZE), + 3: ("receivedComplaints", 0, 0), + 4: ("receivedJustifications", 0, 0), + 5: ("receivedPrematureCommitments", LLMQ_TEST_SIZE, LLMQ_TEST_DIP0024_SIZE), +} +PHASE_NAMES = {1: "init", 2: "contribute", 3: "complain", 4: "justify", 5: "commit", 6: "finalize"} -def _all_nodes(network): - return [network.controller] + network.masternodes +class DKGCycleError(RuntimeError): + """Raised when a DKG cycle step fails to complete in time.""" -def _set_mocktime(network, mocktime): - """Set mocktime on all nodes.""" - network._mocktime = mocktime - for node in _all_nodes(network): - try: - node.rpc.call("setmocktime", mocktime) - except Exception: - pass +def _find_session(status, llmq_type_name, quorum_hash): + """Return the dkgstatus session entry for `(llmq_type_name, quorum_hash)`, or None.""" + for s in status.get("session", []): + if s.get("llmqType") != llmq_type_name: + continue + if s.get("status", {}).get("quorumHash") != quorum_hash: + continue + return s + return None -def _bump_mocktime(network, seconds=1): - """Advance mocktime and run mockscheduler on all nodes.""" - network._mocktime += seconds - for node in _all_nodes(network): - try: - node.rpc.call("setmocktime", network._mocktime) - node.rpc.call("mockscheduler", seconds) - except Exception: - pass +def _find_connection_group(status, llmq_type_name, quorum_hash): + """Return the quorumConnections entry for `(llmq_type_name, quorum_hash)`, or None.""" + for g in status.get("quorumConnections", []): + if g.get("llmqType") == llmq_type_name and g.get("quorumHash") == quorum_hash: + return g + return None -def _move_blocks(network, count): - """Bump mocktime, generate blocks, and wait for sync.""" - if count <= 0: - return - _bump_mocktime(network, 1) - network.generate_blocks(count) - network.wait_for_sync() +def _dump_dkg_status(network, llmq_type_name, quorum_hash): + """Print per-masternode DKG session and commitment state for failure diagnosis.""" + type_num = LLMQ_TYPE_NUM.get(llmq_type_name) + try: + qlist = network.controller.rpc.call("quorum", "list", 10) + listed = qlist.get(llmq_type_name, []) + if quorum_hash in listed: + print(f" controller quorum list: {llmq_type_name} {quorum_hash[:16]} IS LISTED") + else: + print(f" controller quorum list ({llmq_type_name}): {[h[:16] for h in listed]}") + except Exception as e: + print(f" controller quorum list error: {e}") + try: + best_hash = network.controller.rpc.call("getbestblockhash") + block = network.controller.rpc.call("getblock", best_hash, 2) + height = block.get("height") + for tx in block.get("tx", []): + if tx.get("type") == 6: + print(f" tip block {height} qc tx txid={tx.get('txid', '')[:16]}") + except Exception as e: + print(f" getblock tip error: {e}") -def _force_finish_mnsync(node): - """Force a node to finish mnsync (masternodes reject connections until synced).""" - for _ in range(20): + for mn in network.masternodes: try: - status = node.rpc.call("mnsync", "status") - if status.get("IsSynced", False): - return - node.rpc.call("mnsync", "next") - time.sleep(0.5) - except Exception: - time.sleep(0.5) + status = mn.rpc.call("quorum", "dkgstatus") + except Exception as e: + print(f" {mn.name}: dkgstatus error: {e}") + continue + session = _find_session(status, llmq_type_name, quorum_hash) + if session is None: + print(f" {mn.name}: no {llmq_type_name} session for {quorum_hash[:16]}") + else: + qs = session.get("status", {}) + print( + f" {mn.name}: phase={qs.get('phase')} " + f"sent=c:{qs.get('sentContributions')},co:{qs.get('sentComplaint')}," + f"j:{qs.get('sentJustification')},pc:{qs.get('sentPrematureCommitment')} " + f"aborted={qs.get('aborted')} bad={qs.get('badMembers')} " + f"recv=c:{qs.get('receivedContributions', 0)},co:{qs.get('receivedComplaints', 0)}," + f"j:{qs.get('receivedJustifications', 0)},pc:{qs.get('receivedPrematureCommitments', 0)}" + ) + commits = status.get("minableCommitments", []) + print(f" minableCommitments total={len(commits)}") + for commit in commits: + pk = commit.get("quorumPublicKey", "?") + match = commit.get("quorumHash") == quorum_hash and (type_num is None or commit.get("llmqType") == type_num) + print( + f" type={commit.get('llmqType')} idx={commit.get('quorumIndex')} " + f"qh={commit.get('quorumHash', '')[:16]} " + f"signers={commit.get('signersCount')}/{commit.get('validMembersCount')} " + f"pkHead={pk[:16]}{' <-- expected' if match else ''}" + ) + conn_group = _find_connection_group(status, llmq_type_name, quorum_hash) + if conn_group is not None: + peers = conn_group.get("quorumConnections", []) + connected = sum(1 for p in peers if p.get("connected") is True) + print(f" quorumConnections: {connected}/{len(peers)} connected") -def _wait_for_quorum_phase( +def _raise_with_diagnostic(network, message, llmq_type_name, quorum_hash): + print(f"\n Diagnostic for {llmq_type_name} {quorum_hash[:16]}:") + _dump_dkg_status(network, llmq_type_name, quorum_hash) + raise DKGCycleError(message) + + +def wait_for_quorum_phase( network, llmq_type_name, quorum_hash, @@ -106,14 +155,16 @@ def _wait_for_quorum_phase( check_received_messages_count=0, timeout=60, ): - """Wait for masternodes to reach a DKG phase with optional message count gating. + """Wait for `expected_members` masternodes to reach a DKG phase. - Mirrors Dash Core test_framework.wait_for_quorum_phase: when - `check_received_messages` is set, a masternode is only counted once its - session for (llmq_type_name, quorum_hash) reports at least + When `check_received_messages` is set, a masternode is only counted once + its session for (llmq_type_name, quorum_hash) reports at least `check_received_messages_count` for that field. Without this gate, phase - transitions advance before contributions/premature commitments have been - exchanged, producing null DKG commitments. + transitions advance before the DKG messages have been exchanged, + producing null commitments. + + Mirrors Dash Core test_framework.wait_for_quorum_phase. Raises + DKGCycleError on timeout. """ start = time.time() while time.time() - start < timeout: @@ -121,82 +172,74 @@ def _wait_for_quorum_phase( for mn in network.masternodes: try: status = mn.rpc.call("quorum", "dkgstatus") - for s in status.get("session", []): - if s.get("llmqType") != llmq_type_name: - continue - qs = s.get("status", {}) - if qs.get("quorumHash") != quorum_hash: - continue - if qs.get("phase") == phase and ( - check_received_messages is None - or qs.get(check_received_messages, 0) >= check_received_messages_count - ): - member_count += 1 - break except Exception: - pass + continue + session = _find_session(status, llmq_type_name, quorum_hash) + if session is None: + continue + qs = session.get("status", {}) + if qs.get("phase") != phase: + continue + if ( + check_received_messages is not None + and qs.get(check_received_messages, 0) < check_received_messages_count + ): + continue + member_count += 1 if member_count >= expected_members: - return True - _bump_mocktime(network, 1) + return + network.bump_mocktime(1) time.sleep(0.3) - return False + _raise_with_diagnostic( + network, + f"phase {phase} timeout ({llmq_type_name} {quorum_hash[:16]})", + llmq_type_name, + quorum_hash, + ) -def _wait_for_quorum_connections( - network, llmq_type_name, quorum_hash, expected_members, expected_connections, timeout=60 -): - """Wait for actual TCP connections to be established for the quorum. +def wait_for_quorum_connections(network, llmq_type_name, quorum_hash, expected_members, timeout=60): + """Wait until `expected_members` masternodes report the DKG peer mesh is up. - Requires `expected_members` masternodes to each report at least - `expected_connections` peers in the connected state in their - `quorumConnections` entry for (llmq_type_name, quorum_hash). + With SPORK_21_QUORUM_ALL_CONNECTED active each member expects `size - 1` + connected peers. Raises DKGCycleError on timeout. """ + expected_connections = expected_members - 1 start = time.time() while time.time() - start < timeout: - ready_members = 0 + ready = 0 for mn in network.masternodes: try: status = mn.rpc.call("quorum", "dkgstatus") - sessions = status.get("session", []) - has_session = any( - s.get("llmqType") == llmq_type_name and s.get("status", {}).get("quorumHash") == quorum_hash - for s in sessions - ) - if not has_session: - continue - - group = next( - ( - g - for g in status.get("quorumConnections", []) - if g.get("llmqType") == llmq_type_name and g.get("quorumHash") == quorum_hash - ), - None, - ) - if not group: - continue - - peers = group.get("quorumConnections", []) - connected = sum(1 for p in peers if p.get("connected") is True) - if connected >= expected_connections: - ready_members += 1 except Exception: - pass - if ready_members >= expected_members: - return True - _bump_mocktime(network, 1) + continue + if _find_session(status, llmq_type_name, quorum_hash) is None: + continue + group = _find_connection_group(status, llmq_type_name, quorum_hash) + if group is None: + continue + peers = group.get("quorumConnections", []) + if sum(1 for p in peers if p.get("connected") is True) >= expected_connections: + ready += 1 + if ready >= expected_members: + return + network.bump_mocktime(1) time.sleep(0.5) - return False + _raise_with_diagnostic( + network, + f"quorum connection timeout ({llmq_type_name} {quorum_hash[:16]})", + llmq_type_name, + quorum_hash, + ) -def _wait_for_quorum_list(network, llmq_type_name, quorum_hashes, timeout=15): - """Wait for every hash in `quorum_hashes` to appear in `quorum list` for the type. +def wait_for_quorum_list(network, llmq_type_name, quorum_hashes, timeout=15): + """Wait until every hash in `quorum_hashes` appears in `quorum list` for the type. - Calls `quorum list` without a count argument so dashd returns up to - `signingActiveQuorumCount` quorums per type (2 for type 103, 2 for type - 100). Passing `count=1` would only return one entry, which hides the - second quorum of a rotating DIP-0024 cycle (q_1) even when it was - successfully mined. + `quorum list` is called without a count argument so dashd returns up to + `signingActiveQuorumCount` quorums per type (2 for rotating types). + Passing `count=1` would hide q_1 of a DIP-0024 cycle. Raises + DKGCycleError on timeout. """ rpc = network.controller.rpc hashes = list(quorum_hashes) @@ -206,11 +249,16 @@ def _wait_for_quorum_list(network, llmq_type_name, quorum_hashes, timeout=15): qlist = rpc.call("quorum", "list") listed = qlist.get(llmq_type_name, []) if all(h in listed for h in hashes): - return True + return except Exception: pass time.sleep(0.3) - return False + _raise_with_diagnostic( + network, + f"{llmq_type_name} {[h[:16] for h in hashes]} missing from quorum list", + llmq_type_name, + hashes[0], + ) def phase_1_bootstrap(network): @@ -220,12 +268,8 @@ def phase_1_bootstrap(network): print("=" * 60) rpc = network.controller.rpc + network.set_mocktime(TIME_GENESIS_BLOCK) - # Initialize mocktime on controller (matches Dash Core test framework) - network._mocktime = TIME_GENESIS_BLOCK - rpc.call("setmocktime", network._mocktime) - - # Create the SPV test wallet try: rpc.call("createwallet", "wallet") print(" Created 'wallet' wallet") @@ -236,19 +280,18 @@ def phase_1_bootstrap(network): else: raise - # Get a funding address for protx registration + # Funding address for mining rewards + later protx registration fees. network.fund_address = rpc.call("getnewaddress") - # Mine blocks for maturity + activation (need coins to be spendable) - # Mine in batches with mocktime bumps (like Dash Core's cache setup) + # Mine in batches with mocktime bumps (like Dash Core's cache setup) to + # accumulate spendable coins past the coinbase maturity window. for _ in range(5): - _bump_mocktime(network, 25 * 156) + network.bump_mocktime(25 * 156) rpc.call("generatetoaddress", 25, network.fund_address) height = rpc.call("getblockcount") balance = rpc.call("getbalance") print(f" Mined to height {height}, balance: {balance}") - blockchain_info = rpc.call("getblockchaininfo") print(f" Softforks: {list(blockchain_info.get('softforks', {}).keys())}") @@ -267,10 +310,7 @@ def phase_2_register_masternodes(network): service_addr = f"127.0.0.1:{mn_ports[i]}" print(f"\n Registering {mn_name} (service: {service_addr})...") - bls_result = rpc.call("bls", "generate") - bls_public = bls_result["public"] - bls_secret = bls_result["secret"] - + bls = rpc.call("bls", "generate") owner_addr = rpc.call("getnewaddress") voting_addr = rpc.call("getnewaddress") payout_addr = rpc.call("getnewaddress") @@ -283,7 +323,7 @@ def phase_2_register_masternodes(network): collateral_addr, [service_addr], owner_addr, - bls_public, + bls["public"], voting_addr, 0, payout_addr, @@ -291,20 +331,21 @@ def phase_2_register_masternodes(network): ) # Bury the protx (1 confirmation) - _bump_mocktime(network, 601) + network.bump_mocktime(601) rpc.call("generatetoaddress", 1, network.fund_address) - mn_info = { - "index": i, - "name": mn_name, - "pro_tx_hash": pro_tx_hash, - "bls_public_key": bls_public, - "bls_private_key": bls_secret, - "owner_address": owner_addr, - "voting_address": voting_addr, - "payout_address": payout_addr, - } - network.masternode_info.append(mn_info) + network.masternode_info.append( + { + "index": i, + "name": mn_name, + "pro_tx_hash": pro_tx_hash, + "bls_public_key": bls["public"], + "bls_private_key": bls["secret"], + "owner_address": owner_addr, + "voting_address": voting_addr, + "payout_address": payout_addr, + } + ) print(f" proTxHash: {pro_tx_hash}") height = rpc.call("getblockcount") @@ -324,10 +365,9 @@ def phase_3_start_masternodes(network): print("Phase 3: Start masternode nodes") print("=" * 60) - # Start all nodes (does not connect them yet) - network.start_masternode_nodes(network.controller.datadir) + network.start_masternode_nodes() - # Re-load the "wallet" wallet (lost during controller restart) + # Re-load the "wallet" wallet (lost during controller restart). rpc = network.controller.rpc try: rpc.call("loadwallet", "wallet") @@ -335,32 +375,30 @@ def phase_3_start_masternodes(network): if "already loaded" not in str(e).lower(): raise - # Set mocktime on all nodes via RPC (in addition to -mocktime= cmd arg) - _set_mocktime(network, network._mocktime) + # Re-apply mocktime via RPC on every node (the cmdline `-mocktime=` only + # seeds it; RPC `setmocktime` is what the DKG scheduler consults). + network.set_mocktime(network.mocktime) - # Force mnsync on all nodes (must happen before connecting) print(" Forcing mnsync completion on controller...") - _force_finish_mnsync(network.controller) + network.controller.force_finish_mnsync() for mn in network.masternodes: print(f" Forcing mnsync completion on {mn.name}...") - _force_finish_mnsync(mn) + mn.force_finish_mnsync() print(" All nodes mnsync complete") - # Now connect all nodes (mnsync must be done first) print(" Connecting nodes...") network.connect_all() # Mine 8 blocks for masternode maturity - _move_blocks(network, 8) + network.move_blocks(8) - # Verify masternode status mn_list = rpc.call("masternode", "list") enabled_count = sum(1 for v in mn_list.values() if "ENABLED" in str(v)) print(f" Masternodes ENABLED: {enabled_count}/{NUM_MASTERNODES}") if enabled_count < NUM_MASTERNODES: for _ in range(10): - _move_blocks(network, 4) + network.move_blocks(4) time.sleep(1) mn_list = rpc.call("masternode", "list") enabled_count = sum(1 for v in mn_list.values() if "ENABLED" in str(v)) @@ -379,143 +417,51 @@ def phase_4_enable_sporks(network): print("=" * 60) rpc = network.controller.rpc - - rpc.call("sporkupdate", "SPORK_17_QUORUM_DKG_ENABLED", 0) - rpc.call("sporkupdate", "SPORK_21_QUORUM_ALL_CONNECTED", 0) - rpc.call("sporkupdate", "SPORK_2_INSTANTSEND_ENABLED", 0) - rpc.call("sporkupdate", "SPORK_3_INSTANTSEND_BLOCK_FILTERING", 0) - rpc.call("sporkupdate", "SPORK_19_CHAINLOCKS_ENABLED", 0) - - # Wait for spork propagation - time.sleep(3) - _bump_mocktime(network, 1) - - sporks = rpc.call("spork", "show") - for name in [ + sporks_to_enable = [ "SPORK_17_QUORUM_DKG_ENABLED", "SPORK_21_QUORUM_ALL_CONNECTED", "SPORK_2_INSTANTSEND_ENABLED", + "SPORK_3_INSTANTSEND_BLOCK_FILTERING", "SPORK_19_CHAINLOCKS_ENABLED", - ]: - value = sporks.get(name, "unknown") - print(f" {name}: {value}") - - -def _phase_checks_for(phase): - """Return the (field, count_for_type100, count_for_type103) message gate per phase. - - Follows Dash Core's mine_quorum / mine_cycle_quorum expectations: - - phase 2 (contribute): receivedContributions == size - - phase 3 (complain): receivedComplaints == 0 - - phase 4 (justify): receivedJustifications == 0 - - phase 5 (commit): receivedPrematureCommitments == size - - phases 1/6: no message gate - """ - if phase == 2: - return "receivedContributions", LLMQ_TEST_SIZE, LLMQ_TEST_DIP0024_SIZE - if phase == 3: - return "receivedComplaints", 0, 0 - if phase == 4: - return "receivedJustifications", 0, 0 - if phase == 5: - return "receivedPrematureCommitments", LLMQ_TEST_SIZE, LLMQ_TEST_DIP0024_SIZE - return None, 0, 0 + ] + for name in sporks_to_enable: + rpc.call("sporkupdate", name, 0) + # Wait for spork propagation + time.sleep(3) + network.bump_mocktime(1) -def _dump_dkg_status(network, llmq_type_name, quorum_hash): - """Print per-masternode DKG session state and commitment state for diagnosis.""" - type_num = {"llmq_test": 100, "llmq_test_dip0024": 103}.get(llmq_type_name) - # Check if the quorum has already been mined - a non-null commitment that lands - # in a block is removed from `minableCommitments`, so "no entry" can mean either - # "not yet constructed" or "already committed to chain". Look at `quorum list` - # on the controller to distinguish. - try: - qlist = network.controller.rpc.call("quorum", "list", 10) - listed_for_type = qlist.get(llmq_type_name, []) - if quorum_hash in listed_for_type: - print(f" controller quorum list: {llmq_type_name} {quorum_hash[:16]} IS LISTED") - else: - print(f" controller quorum list ({llmq_type_name}): {[h[:16] for h in listed_for_type]}") - except Exception as e: - print(f" controller quorum list error: {e}") - # Also inspect the latest block to see if it contains a non-null commitment. - try: - best_hash = network.controller.rpc.call("getbestblockhash") - block = network.controller.rpc.call("getblock", best_hash, 2) - height = block.get("height") - for tx in block.get("tx", []): - if tx.get("type") == 6 or "qc" in tx or "qcTx" in tx: - print(f" tip block {height} has special tx type={tx.get('type')} txid={tx.get('txid', '')[:16]}") - except Exception as e: - print(f" getblock tip error: {e}") - for mn in network.masternodes: - try: - status = mn.rpc.call("quorum", "dkgstatus") - except Exception as e: - print(f" {mn.name}: dkgstatus error: {e}") - continue - session = next( - ( - s - for s in status.get("session", []) - if s.get("llmqType") == llmq_type_name and s.get("status", {}).get("quorumHash") == quorum_hash - ), - None, - ) - if session is None: - print(f" {mn.name}: no {llmq_type_name} session for {quorum_hash[:16]}") - else: - qs = session.get("status", {}) - print( - f" {mn.name}: phase={qs.get('phase')} " - f"sent=c:{qs.get('sentContributions')},co:{qs.get('sentComplaint')}," - f"j:{qs.get('sentJustification')},pc:{qs.get('sentPrematureCommitment')} " - f"aborted={qs.get('aborted')} bad={qs.get('badMembers')} " - f"recv=c:{qs.get('receivedContributions', 0)},co:{qs.get('receivedComplaints', 0)}," - f"j:{qs.get('receivedJustifications', 0)},pc:{qs.get('receivedPrematureCommitments', 0)}" - ) - all_commits = status.get("minableCommitments", []) - print(f" minableCommitments total={len(all_commits)}") - for commit in all_commits: - pk = commit.get("quorumPublicKey", "?") - mark = ( - " <-- expected" - if ( - commit.get("quorumHash") == quorum_hash and (type_num is None or commit.get("llmqType") == type_num) - ) - else "" - ) - print( - f" type={commit.get('llmqType')} idx={commit.get('quorumIndex')} " - f"qh={commit.get('quorumHash', '')[:16]} " - f"signers={commit.get('signersCount')}/{commit.get('validMembersCount')} " - f"pkHead={pk[:16]}{mark}" - ) - conn_group = next( - ( - g - for g in status.get("quorumConnections", []) - if g.get("llmqType") == llmq_type_name and g.get("quorumHash") == quorum_hash - ), - None, - ) - if conn_group is not None: - peers = conn_group.get("quorumConnections", []) - connected = sum(1 for p in peers if p.get("connected") is True) - print(f" quorumConnections: {connected}/{len(peers)} connected") + sporks = rpc.call("spork", "show") + for name in sporks_to_enable: + print(f" {name}: {sporks.get(name, 'unknown')}") -class DKGCycleError(RuntimeError): - """Raised when a DKG cycle step fails to complete in time.""" +def _wait_for_dkg_phase(network, q_0, q_1, phase): + """Drive one DKG phase transition on both type-100 and type-103 rotating sessions. + At this point the chain is at `cycle+2*(phase-1)-1` for phases >= 2 — i.e. + one block before q_0 enters `phase`. Mines two blocks total (one to enter + phase on q_0, one to enter phase on q_1) with gating on the expected DKG + message count so the chain never advances past a phase before real + messages have been exchanged. + """ + # Phase 6 (finalize) has no message gate; only phases 2-5 do. + field, count_test, count_dip0024 = PHASE_GATES.get(phase, (None, 0, 0)) + network.move_blocks(1) # enter phase on q_0 (and on the type-100 session) + + print(f" Phase {phase} ({PHASE_NAMES[phase]}) q_0...", end="", flush=True) + wait_for_quorum_phase(network, "llmq_test", q_0, phase, LLMQ_TEST_SIZE, field, count_test, timeout=45) + wait_for_quorum_phase( + network, "llmq_test_dip0024", q_0, phase, LLMQ_TEST_DIP0024_SIZE, field, count_dip0024, timeout=45 + ) + print(" ok", end="", flush=True) -def _require(cond, message, network=None, llmq_type_name=None, quorum_hash=None): - if cond: - return - if network is not None and quorum_hash is not None: - print(f"\n Diagnostic for {llmq_type_name} {quorum_hash[:16]}:") - _dump_dkg_status(network, llmq_type_name or "llmq_test", quorum_hash) - raise DKGCycleError(message) + network.move_blocks(1) # enter phase on q_1 (rotating only) + print(" q_1...", end="", flush=True) + wait_for_quorum_phase( + network, "llmq_test_dip0024", q_1, phase, LLMQ_TEST_DIP0024_SIZE, field, count_dip0024, timeout=45 + ) + print(" ok") def _run_single_dkg_cycle(network, cycle_idx, num_cycles, cycle_quorum_is_ready): @@ -537,7 +483,8 @@ def _run_single_dkg_cycle(network, cycle_idx, num_cycles, cycle_quorum_is_ready) """ rpc = network.controller.rpc - # Align to the next cycle boundary, with 3-cycle warmup on the first call. + # Align to the next cycle boundary (staying put if already on one), adding + # the DIP-0024 3-cycle warmup on the first call. height = rpc.call("getblockcount") skip_count = DKG_INTERVAL - (height % DKG_INTERVAL) if skip_count == DKG_INTERVAL: @@ -547,139 +494,52 @@ def _run_single_dkg_cycle(network, cycle_idx, num_cycles, cycle_quorum_is_ready) if total_move > 0: if warmup_blocks > 0: print(f" DIP-0024 warmup: mining {total_move} blocks before first cycle...") - _move_blocks(network, total_move) + network.move_blocks(total_move) q_0 = rpc.call("getbestblockhash") height = rpc.call("getblockcount") print(f"\n Cycle {cycle_idx + 1}/{num_cycles} at height {height} q_0={q_0[:16]}...") - # Phase 1 (init) on q_0 for both types, plus connections + # Phase 1 (init) + peer mesh for q_0 on both llmq_test and llmq_test_dip0024. print(" Phase 1 (init) q_0...", end="", flush=True) - _require( - _wait_for_quorum_phase(network, "llmq_test", q_0, 1, LLMQ_TEST_SIZE, timeout=60), - "phase 1 timeout (llmq_test q_0)", - network, - "llmq_test", - q_0, - ) - _require( - _wait_for_quorum_phase(network, "llmq_test_dip0024", q_0, 1, LLMQ_TEST_DIP0024_SIZE, timeout=60), - "phase 1 timeout (llmq_test_dip0024 q_0)", - network, - "llmq_test_dip0024", - q_0, - ) - _require( - _wait_for_quorum_connections(network, "llmq_test", q_0, LLMQ_TEST_SIZE, LLMQ_TEST_SIZE - 1, timeout=60), - "quorum connection timeout (llmq_test q_0)", - network, - "llmq_test", - q_0, - ) - _require( - _wait_for_quorum_connections( - network, "llmq_test_dip0024", q_0, LLMQ_TEST_DIP0024_SIZE, LLMQ_TEST_DIP0024_SIZE - 1, timeout=60 - ), - "quorum connection timeout (llmq_test_dip0024 q_0)", - network, - "llmq_test_dip0024", - q_0, - ) + wait_for_quorum_phase(network, "llmq_test", q_0, 1, LLMQ_TEST_SIZE, timeout=60) + wait_for_quorum_phase(network, "llmq_test_dip0024", q_0, 1, LLMQ_TEST_DIP0024_SIZE, timeout=60) + wait_for_quorum_connections(network, "llmq_test", q_0, LLMQ_TEST_SIZE, timeout=60) + wait_for_quorum_connections(network, "llmq_test_dip0024", q_0, LLMQ_TEST_DIP0024_SIZE, timeout=60) print(" ok") - # Advance 1 block -> q_1 (the rotating pair's second quorum) enters phase 1 - _move_blocks(network, 1) + # Advance 1 block -> q_1 (the rotating pair's second quorum) enters phase 1. + network.move_blocks(1) q_1 = rpc.call("getbestblockhash") print(f" Phase 1 (init) q_1={q_1[:16]}...", end="", flush=True) - _require( - _wait_for_quorum_phase(network, "llmq_test_dip0024", q_1, 1, LLMQ_TEST_DIP0024_SIZE, timeout=60), - "phase 1 timeout (llmq_test_dip0024 q_1)", - network, - "llmq_test_dip0024", - q_1, - ) - _require( - _wait_for_quorum_connections( - network, "llmq_test_dip0024", q_1, LLMQ_TEST_DIP0024_SIZE, LLMQ_TEST_DIP0024_SIZE - 1, timeout=60 - ), - "quorum connection timeout (llmq_test_dip0024 q_1)", - network, - "llmq_test_dip0024", - q_1, - ) + wait_for_quorum_phase(network, "llmq_test_dip0024", q_1, 1, LLMQ_TEST_DIP0024_SIZE, timeout=60) + wait_for_quorum_connections(network, "llmq_test_dip0024", q_1, LLMQ_TEST_DIP0024_SIZE, timeout=60) print(" ok") - # Walk phases 2-6 block-by-block, alternating q_0 and q_1 checks. - # At each even block of the cycle, q_0 enters the next phase (together - # with the llmq_test session). At each odd block, q_1 enters it. + # Phases 2-6 block-by-block. Each iteration mines 2 blocks: one to enter + # the phase on q_0 (and the type-100 session, which shares even offsets), + # and one to enter the phase on q_1. for phase in range(2, 7): - field, count_test, count_dip0024 = _phase_checks_for(phase) - _move_blocks(network, 1) # enter phase on q_0 + type 100 - - phase_name = {2: "contribute", 3: "complain", 4: "justify", 5: "commit", 6: "finalize"}[phase] - print(f" Phase {phase} ({phase_name}) q_0...", end="", flush=True) - _require( - _wait_for_quorum_phase(network, "llmq_test", q_0, phase, LLMQ_TEST_SIZE, field, count_test, timeout=45), - f"phase {phase} timeout (llmq_test q_0)", - network, - "llmq_test", - q_0, - ) - _require( - _wait_for_quorum_phase( - network, "llmq_test_dip0024", q_0, phase, LLMQ_TEST_DIP0024_SIZE, field, count_dip0024, timeout=45 - ), - f"phase {phase} timeout (llmq_test_dip0024 q_0)", - network, - "llmq_test_dip0024", - q_0, - ) - print(" ok", end="", flush=True) - - _move_blocks(network, 1) # enter phase on q_1 - print(" q_1...", end="", flush=True) - _require( - _wait_for_quorum_phase( - network, "llmq_test_dip0024", q_1, phase, LLMQ_TEST_DIP0024_SIZE, field, count_dip0024, timeout=45 - ), - f"phase {phase} timeout (llmq_test_dip0024 q_1)", - network, - "llmq_test_dip0024", - q_1, - ) - print(" ok") + _wait_for_dkg_phase(network, q_0, q_1, phase) # Mine the commit block. At cycle+12 the controller creates a block that # includes the real finalCommitment txs for type 100 (window [cycle+10, # cycle+18]) and for both type 103 rotating quorums (window [cycle+12, # cycle+20]). Mirrors the final mining step in test_framework's # mine_cycle_quorum (getblocktemplate + generate(1)). - _bump_mocktime(network, 1) + network.bump_mocktime(1) rpc.call("getblocktemplate") - _move_blocks(network, 1) - - # Confirm all three real quorums were recorded by the chain. `quorum list` - # reflects commitments stored in evoDB by ProcessCommitment, which only - # writes non-null commitments. If any quorum is missing, its commitment - # was null in the mined block. - _require( - _wait_for_quorum_list(network, "llmq_test", [q_0], timeout=15), - "llmq_test q_0 missing from quorum list after commit block", - network, - "llmq_test", - q_0, - ) - _require( - _wait_for_quorum_list(network, "llmq_test_dip0024", [q_0, q_1], timeout=15), - "llmq_test_dip0024 q_0/q_1 missing from quorum list after commit block", - network, - "llmq_test_dip0024", - q_0, - ) + network.move_blocks(1) + + # `quorum list` reflects commitments stored in evoDB by ProcessCommitment, + # which only writes non-null commitments. If a hash is missing, the + # corresponding commitment was null in the mined block. + wait_for_quorum_list(network, "llmq_test", [q_0]) + wait_for_quorum_list(network, "llmq_test_dip0024", [q_0, q_1]) # Mine 8 blocks (SIGN_HEIGHT_OFFSET) for signing-window maturity, matching # the tail of test_framework.mine_cycle_quorum. - _move_blocks(network, 8) + network.move_blocks(8) qlist = rpc.call("quorum", "list") print( @@ -698,23 +558,20 @@ def phase_5_mine_dkg_cycles(network, num_cycles): print(f"Phase 5: Mine {num_cycles} DKG cycles") print("=" * 60) - completed_cycles = 0 cycle_quorum_is_ready = False for cycle in range(num_cycles): _run_single_dkg_cycle(network, cycle, num_cycles, cycle_quorum_is_ready) # Warmup is applied on the first call; subsequent cycles skip it. cycle_quorum_is_ready = True - completed_cycles += 1 rpc = network.controller.rpc height = rpc.call("getblockcount") - quorum_list = rpc.call("quorum", "list") - print(f"\n Completed {completed_cycles}/{num_cycles} DKG cycles (height: {height})") + qlist = rpc.call("quorum", "list") + print(f"\n Completed {num_cycles}/{num_cycles} DKG cycles (height: {height})") print( - f" Quorums: llmq_test={len(quorum_list.get('llmq_test', []))}, " - f"llmq_test_dip0024={len(quorum_list.get('llmq_test_dip0024', []))}" + f" Quorums: llmq_test={len(qlist.get('llmq_test', []))}, " + f"llmq_test_dip0024={len(qlist.get('llmq_test_dip0024', []))}" ) - return completed_cycles def phase_6_generate_test_transactions(network): @@ -724,26 +581,20 @@ def phase_6_generate_test_transactions(network): print("=" * 60) rpc = network.controller.rpc - - addresses = [] - for _ in range(10): - addr = rpc.call("getnewaddress", wallet="wallet") - addresses.append(addr) - + addresses = [rpc.call("getnewaddress", wallet="wallet") for _ in range(10)] amounts = [1.0, 5.0, 10.0, 0.5, 25.0, 0.1, 50.0, 2.5] for i, amount in enumerate(amounts): - addr = addresses[i % len(addresses)] - rpc.call("sendtoaddress", addr, amount) + rpc.call("sendtoaddress", addresses[i % len(addresses)], amount) if (i + 1) % 3 == 0: - _move_blocks(network, 1) + network.move_blocks(1) - _move_blocks(network, 6) + network.move_blocks(6) height = rpc.call("getblockcount") print(f" Generated {len(amounts)} test transactions (height: {height})") -def phase_7_export(network, config, dkg_cycles_completed): +def phase_7_export(network, output_base_dir, dkg_cycles): """Export all node data and metadata.""" print("\n" + "=" * 60) print("Phase 7: Export") @@ -752,7 +603,7 @@ def phase_7_export(network, config, dkg_cycles_completed): rpc = network.controller.rpc chain_height = rpc.call("getblockcount") - output_dir = Path(config.output_dir) / "regtest-mn-v0.0.1" + output_dir = Path(output_base_dir) / "regtest-mn-v0.0.1" if output_dir.exists(): print(f" Removing existing output: {output_dir}") shutil.rmtree(output_dir) @@ -766,32 +617,24 @@ def phase_7_export(network, config, dkg_cycles_completed): save_wallet_file(wallet_stats, wallets_dir / "wallet.json") print(f" wallet.json: {len(wallet_stats['transactions'])} txs, balance: {wallet_stats['balance']:.8f}") - # Stop all nodes cleanly print(" Stopping all nodes...") network.stop_all() time.sleep(2) - # Copy datadirs print(" Copying controller datadir...") - controller_dest = output_dir / "controller" - shutil.copytree(network.controller.datadir / "regtest", controller_dest / "regtest") + shutil.copytree(network.controller.datadir / "regtest", output_dir / "controller" / "regtest") for i, mn in enumerate(network.masternodes): mn_name = f"mn{i + 1}" print(f" Copying {mn_name} datadir...") - mn_dest = output_dir / mn_name - shutil.copytree(mn.datadir / "regtest", mn_dest / "regtest") + shutil.copytree(mn.datadir / "regtest", output_dir / mn_name / "regtest") - # Write network.json network_metadata = { "version": "0.0.1", "chain_height": chain_height, - "dkg_cycles_completed": dkg_cycles_completed, + "dkg_cycles_completed": dkg_cycles, "dkg_interval": DKG_INTERVAL, - "controller": { - "datadir": "controller", - "wallet": "wallet", - }, + "controller": {"datadir": "controller", "wallet": "wallet"}, "masternodes": [ { "index": mn["index"], @@ -816,7 +659,7 @@ def phase_7_export(network, config, dkg_cycles_completed): print(f"\n Exported to {output_dir}") print(f" Total size: {total_size / 1024 / 1024:.1f} MB") print(f" Chain height: {chain_height}") - print(f" DKG cycles: {dkg_cycles_completed}") + print(f" DKG cycles: {dkg_cycles}") return output_dir @@ -828,38 +671,31 @@ def main(): parser.add_argument("--output-dir", default=str(Path(__file__).parent / "data"), help="Output directory") args = parser.parse_args() - config = MasternodeConfig( - dashd_path=args.dashd_path, - dkg_cycles=args.dkg_cycles, - output_dir=args.output_dir, - ) - - dashd_bin = Path(config.dashd_path) + dashd_bin = Path(args.dashd_path) if not dashd_bin.exists(): print(f"dashd not found: {dashd_bin}") sys.exit(1) - extra_args = list(DASHD_EXTRA_ARGS) - extra_args.append(f"-sporkkey={SPORK_PRIVATE_KEY}") - + extra_args = [*DASHD_EXTRA_ARGS, f"-sporkkey={SPORK_PRIVATE_KEY}"] network = MasternodeNetwork( - dashd_path=config.dashd_path, + dashd_path=args.dashd_path, num_masternodes=NUM_MASTERNODES, base_extra_args=extra_args, ) try: - # Set initial mocktime on network object before any node starts - network._mocktime = TIME_GENESIS_BLOCK - + # Seed mocktime before any node starts so `-mocktime=` matches the + # tracked value when the controller first launches. + network.mocktime = TIME_GENESIS_BLOCK network.start_controller(extra_args=[f"-mocktime={TIME_GENESIS_BLOCK}"]) + phase_1_bootstrap(network) phase_2_register_masternodes(network) phase_3_start_masternodes(network) phase_4_enable_sporks(network) - dkg_cycles = phase_5_mine_dkg_cycles(network, config.dkg_cycles) + phase_5_mine_dkg_cycles(network, args.dkg_cycles) phase_6_generate_test_transactions(network) - output_dir = phase_7_export(network, config, dkg_cycles) + output_dir = phase_7_export(network, args.output_dir, args.dkg_cycles) print("\n" + "=" * 60) print("Generation complete!") diff --git a/generator/masternode_network.py b/generator/masternode_network.py index d333e5a..cbb4d0c 100644 --- a/generator/masternode_network.py +++ b/generator/masternode_network.py @@ -7,6 +7,7 @@ import shutil import socket +import subprocess import tempfile import time from pathlib import Path @@ -30,8 +31,6 @@ def __init__(self, name, dashd_path, datadir, rpc_port, p2p_port, extra_args=Non def start(self): """Start the dashd process.""" - import subprocess - regtest_dir = self.datadir / "regtest" regtest_dir.mkdir(parents=True, exist_ok=True) @@ -123,6 +122,37 @@ def stop(self): pass self.process = None + def force_finish_mnsync(self, attempts=20, poll=0.5): + """Force mnsync completion. + + Masternodes reject connections until they have finished mnsync, so + this must be driven explicitly after start-up before peer connections + are issued. + """ + for _ in range(attempts): + try: + status = self.rpc.call("mnsync", "status") + if status.get("IsSynced", False): + return + self.rpc.call("mnsync", "next") + time.sleep(poll) + except Exception: + time.sleep(poll) + + def set_mocktime(self, mocktime, seconds=None): + """Set mocktime on this node and optionally tick the scheduler. + + When `seconds` is given, also runs `mockscheduler` so scheduled tasks + (DKG session processing, etc.) fire at the advanced time. + """ + try: + self.rpc.call("setmocktime", mocktime) + if seconds is not None: + self.rpc.call("mockscheduler", seconds) + except Exception: + # Nodes may be briefly unresponsive during DKG work; tolerate. + pass + def find_free_port(start=19000, attempts=100): """Find an available TCP port.""" @@ -153,6 +183,31 @@ def __init__(self, dashd_path, num_masternodes=4, base_extra_args=None): self.masternode_info = [] # BLS keys, addresses, proTxHashes self.mn_p2p_ports = [] # Pre-allocated P2P ports for MN registration self.fund_address = None # Address holding mining rewards (set in bootstrap) + self.mocktime = 0 # Shared mock time, advanced via set/bump_mocktime + + def all_nodes(self): + """Return [controller, *masternodes], skipping any that are not started.""" + return [n for n in ([self.controller] + self.masternodes) if n is not None] + + def set_mocktime(self, mocktime): + """Set mocktime on all running nodes and update the tracked value.""" + self.mocktime = mocktime + for node in self.all_nodes(): + node.set_mocktime(mocktime) + + def bump_mocktime(self, seconds=1): + """Advance mocktime by `seconds` on all running nodes and tick the scheduler.""" + self.mocktime += seconds + for node in self.all_nodes(): + node.set_mocktime(self.mocktime, seconds=seconds) + + def move_blocks(self, count): + """Bump mocktime, mine `count` blocks on the controller, then sync masternodes.""" + if count <= 0: + return + self.bump_mocktime(1) + self.generate_blocks(count) + self.wait_for_sync() def allocate_mn_ports(self): """Pre-allocate P2P ports for masternodes (needed for protx registration).""" @@ -187,7 +242,7 @@ def start_controller(self, extra_args=None): self.controller.start() return self.controller - def start_masternode_nodes(self, controller_datadir): + def start_masternode_nodes(self): """Start masternode nodes from a copy of the controller's datadir. Each node gets a unique BLS private key. Connection and mnsync @@ -195,6 +250,7 @@ def start_masternode_nodes(self, controller_datadir): Must be called after masternodes have been registered on the controller. """ print("\n Starting masternode nodes...") + assert self.mn_p2p_ports, "allocate_mn_ports() must be called before start_masternode_nodes()" # Stop controller briefly to copy its datadir controller_rpc_port = self.controller.rpc_port @@ -205,12 +261,9 @@ def start_masternode_nodes(self, controller_datadir): self.controller.stop() time.sleep(2) - # Restart controller (with current mocktime if available) - restart_args = list(controller_extra) - if hasattr(self, "_mocktime") and self._mocktime: - # Remove any old mocktime arg and add current one - restart_args = [a for a in restart_args if not a.startswith("-mocktime=")] - restart_args.append(f"-mocktime={self._mocktime}") + # Restart controller with current mocktime baked into the command line. + restart_args = [a for a in controller_extra if not a.startswith("-mocktime=")] + restart_args.append(f"-mocktime={self.mocktime}") self.controller = MasternodeNode( name="controller", dashd_path=self.dashd_path, @@ -236,11 +289,7 @@ def start_masternode_nodes(self, controller_datadir): if stale_path.exists(): stale_path.unlink() - # Use pre-allocated P2P port if available, otherwise find a free one - if i < len(self.mn_p2p_ports): - p2p_port = self.mn_p2p_ports[i] - else: - p2p_port = find_free_port(controller_p2p_port + 10 + i * 10) + p2p_port = self.mn_p2p_ports[i] rpc_port = find_free_port(p2p_port + 1) mn_args = list(self.base_extra_args) @@ -250,11 +299,9 @@ def start_masternode_nodes(self, controller_datadir): "-peerblockfilters=1", "-txindex=1", f"-masternodeblsprivkey={mn_info['bls_private_key']}", + f"-mocktime={self.mocktime}", ] ) - # Pass mocktime at startup so DKG scheduling works - if hasattr(self, "_mocktime") and self._mocktime: - mn_args.append(f"-mocktime={self._mocktime}") node = MasternodeNode( name=mn_name, @@ -268,12 +315,20 @@ def start_masternode_nodes(self, controller_datadir): self.masternodes.append(node) def connect_all(self): - """Connect all nodes to each other following Dash Core's test framework. + """Establish the full controller↔MN and MN↔MN peer mesh. - Disables masternode threads during connection to prevent interference - with the P2P handshake (matching DashTestFramework.connect_nodes). - Uses "onetry" mode as the Dash Core test framework does. + Following Dash Core's test framework, masternode threads are disabled + during connection to prevent interference with the P2P handshake, and + the direct MN↔MN links ensure DKG contributions propagate without + waiting for the quorum manager to build them lazily. """ + + def try_addnode(from_node, target_addr, label): + try: + from_node.rpc.call("addnode", target_addr, "onetry") + except Exception as e: + print(f" Warning: addnode {label} failed: {e}") + # Disable MN threads during connection (prevents handshake interference) for mn in self.masternodes: try: @@ -281,33 +336,16 @@ def connect_all(self): except Exception: pass - # Connect each MN to the controller + controller_addr = f"127.0.0.1:{self.controller.p2p_port}" for mn in self.masternodes: - controller_addr = f"127.0.0.1:{self.controller.p2p_port}" - try: - mn.rpc.call("addnode", controller_addr, "onetry") - except Exception as e: - print(f" Warning: addnode {mn.name}->controller failed: {e}") + try_addnode(mn, controller_addr, f"{mn.name}->controller") + try_addnode(self.controller, f"127.0.0.1:{mn.p2p_port}", f"controller->{mn.name}") - # Also connect controller to each MN - for mn in self.masternodes: - mn_addr = f"127.0.0.1:{mn.p2p_port}" - try: - self.controller.rpc.call("addnode", mn_addr, "onetry") - except Exception as e: - print(f" Warning: addnode controller->{mn.name} failed: {e}") - - # Direct MN<->MN connections so DKG message exchange does not have to - # wait for the quorum manager to build them lazily. Without these, - # DIP-0024 DKGs (4 members, minSize=4) can advance past phase 2 before - # contributions have propagated, producing null commitments. + # Direct MN<->MN links: DIP-0024 quorums (minSize=4) need contributions + # from every member, so seeding the mesh avoids phase-2 starvation. for i, mn_a in enumerate(self.masternodes): for mn_b in self.masternodes[i + 1 :]: - target = f"127.0.0.1:{mn_b.p2p_port}" - try: - mn_a.rpc.call("addnode", target, "onetry") - except Exception as e: - print(f" Warning: addnode {mn_a.name}->{mn_b.name} failed: {e}") + try_addnode(mn_a, f"127.0.0.1:{mn_b.p2p_port}", f"{mn_a.name}->{mn_b.name}") # Re-enable MN threads for mn in self.masternodes: From e955e7f69ceff6fb3f5b5cf86af0500efb201bc7 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Sat, 18 Apr 2026 01:08:45 +1000 Subject: [PATCH 5/8] feat: extend chain past next DKG window before export Add `phase_6b_extend_to_quiet_tip` that stops the masternodes and mines controller-only blocks past the next cycle's mining window (+8 maturity) before export. For the default 8-cycle run this lands the exported tip at height 460, matching the formula `current_cycle_start + DKG_INTERVAL + dkgMiningWindowEnd + SIGN_HEIGHT_OFFSET`. Previously, phase 6 left the chain tip inside a DKG cycle that had started but not reached its mining window (tip 412, inside cycle 408's phase 3). Test harnesses bringing the masternodes back online at that tip would miss phases 1-3 of the in-progress cycle, fail to contribute, and produce null commitments when their mine_dkg_cycle extended past the mining window. That left a future QRInfo diff with no ChainLock sigs for the rotation cycle, tripping the rust-dashcore QRInfo pre-check "Missing rotation ChainLock signatures in QRInfo: sigm0" in `test_instantsend_islock_arrives_before_tx`. Stopping the masternodes before extending guarantees the intermediate cycles settle as null commitments (no MN to broadcast `qfcommit`, so the controller's local `minableCommitmentsByQuorum` is empty at mining time and null commits land deterministically). By the exported tip the mining window has closed, so null commits are already written to evoDB and the cycle is no longer "in progress". When the test harness later restarts the masternodes, it sees a settled chain and can drive a fresh DKG for the next cycle boundary via `mine_dkg_cycle` without racing a half-lived DKG. Extract `DKG_MINING_WINDOW_END` and `SIGN_HEIGHT_OFFSET` as module constants and use them in phase 5's maturity mine (replacing the inline `8`) so the window arithmetic lives in one place. --- generate_masternode.py | 68 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 3 deletions(-) diff --git a/generate_masternode.py b/generate_masternode.py index c2b86fe..266d09c 100755 --- a/generate_masternode.py +++ b/generate_masternode.py @@ -32,6 +32,8 @@ NUM_MASTERNODES = 4 LLMQ_TEST_SIZE = 3 # llmq_test (type 100) - 3 members out of 4 MNs LLMQ_TEST_DIP0024_SIZE = 4 # llmq_test_dip0024 (type 103) - all 4 MNs, minSize=4 +DKG_MINING_WINDOW_END = 20 # matches llmq_test / llmq_test_dip0024 dkgMiningWindowEnd +SIGN_HEIGHT_OFFSET = 8 # maturity blocks after commit so quorum is signing-eligible SPORK_PRIVATE_KEY = "cP4EKFyJsHT39LDqgdcB43Y3YXjNyjb5Fuas1GQSeAtjnZWmZEQK" DASHD_EXTRA_ARGS = [ @@ -537,9 +539,8 @@ def _run_single_dkg_cycle(network, cycle_idx, num_cycles, cycle_quorum_is_ready) wait_for_quorum_list(network, "llmq_test", [q_0]) wait_for_quorum_list(network, "llmq_test_dip0024", [q_0, q_1]) - # Mine 8 blocks (SIGN_HEIGHT_OFFSET) for signing-window maturity, matching - # the tail of test_framework.mine_cycle_quorum. - network.move_blocks(8) + # SIGN_HEIGHT_OFFSET maturity blocks, matching test_framework.mine_cycle_quorum. + network.move_blocks(SIGN_HEIGHT_OFFSET) qlist = rpc.call("quorum", "list") print( @@ -594,6 +595,66 @@ def phase_6_generate_test_transactions(network): print(f" Generated {len(amounts)} test transactions (height: {height})") +def phase_6b_extend_to_quiet_tip(network): + """Stop masternodes and extend the chain past the next DKG mining window. + + After phase 5 (orchestrated DKG cycles) and phase 6 (test transactions), + the tip typically sits inside a DKG cycle whose phase 1 already started + but whose mining window has not opened yet (e.g. block 412, inside cycle + 408's phase 3 for the default 8-cycle run). Exporting at that tip is + pathological for test harnesses that bring the MNs back online later: + the MNs miss the early phases of the in-progress cycle and cannot + contribute, so when the test mines further blocks the cycle's mining + window fills with null commitments. Those null commits then leave a + future QRInfo range without ChainLock sigs for the rotation, tripping + the rust-dashcore QRInfo pre-check ("Missing rotation ChainLock + signatures in QRInfo"). + + The fix: stop the masternodes and mine controller-only blocks past the + next cycle's mining window + maturity. With no MN to broadcast qfcommit, + the controller's `minableCommitmentsByQuorum` is empty at the window, so + null commitments are mined deterministically and the cycle is SETTLED in + the chain. When the test harness later restarts MNs at the exported tip + and drives `mine_dkg_cycle`, it aligns to the next boundary and runs a + fresh DKG from phase 1 — no partial cycle to race against. + + Target tip formula matches the convention in the task brief: + `cycle_start_of_current_cycle + DKG_INTERVAL + dkgMiningWindowEnd + + SIGN_HEIGHT_OFFSET`. For the default run (current tip ~412, current + cycle start 408) this lands at 460. + """ + print("\n" + "=" * 60) + print("Phase 6b: Extend chain to quiet tip") + print("=" * 60) + + rpc = network.controller.rpc + current = rpc.call("getblockcount") + current_cycle_start = current - (current % DKG_INTERVAL) + target = current_cycle_start + DKG_INTERVAL + DKG_MINING_WINDOW_END + SIGN_HEIGHT_OFFSET + if current >= target: + print(f" Already at tip {current}, no extension needed") + return + + print(" Stopping masternodes (controller-only mining from here)...") + for mn in network.masternodes: + mn.stop() + time.sleep(2) + + # Mine on the controller in batches, advancing mocktime in step so block + # timestamps stay strictly increasing. Matches the cache-setup pattern + # in phase_1_bootstrap. + remaining = target - current + print(f" Mining {remaining} blocks on controller (tip {current} -> {target})...") + while remaining > 0: + batch = min(remaining, 25) + network.bump_mocktime(batch * 156) + rpc.call("generatetoaddress", batch, network.fund_address) + remaining -= batch + + final_tip = rpc.call("getblockcount") + print(f" Final tip: {final_tip}") + + def phase_7_export(network, output_base_dir, dkg_cycles): """Export all node data and metadata.""" print("\n" + "=" * 60) @@ -695,6 +756,7 @@ def main(): phase_4_enable_sporks(network) phase_5_mine_dkg_cycles(network, args.dkg_cycles) phase_6_generate_test_transactions(network) + phase_6b_extend_to_quiet_tip(network) output_dir = phase_7_export(network, args.output_dir, args.dkg_cycles) print("\n" + "=" * 60) From 4aaa40da621aa3de3193656faaa55927832bff33 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Sat, 18 Apr 2026 08:48:12 +1000 Subject: [PATCH 6/8] fix: land exported tip in DKG Idle gap with masternodes running MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous `phase_6b_extend_to_quiet_tip` (added in 4ec740f) shut down masternodes and mined controller-only blocks past the next cycle's mining window to "pre-settle" in-progress DKGs as null commits. That inverts the actual requirement: null rotating commits in the exported chain are precisely what trips rust-dashcore's QRInfo pre-check. The concrete failure mode, traced against Dash Core and rust-dashcore: - When the SPV's QRInfo diff covers a rotating cycle whose commitment is null, Dash Core's `BuildQuorumChainlockInfo` walks `new_quorums` and calls `qman.GetQuorum(type, quorumHash)`. Failed DKG commits have no quorum object, so that call returns null and the diff arrives with the null cycle's slot empty in `quorumsCLSigs`. - rust-dashcore's `apply_diff(mn_list_diff_h)` then cannot pull `sigm0` / `sigm1` / ... for that rotating slot, so `maybe_sigmN = None` and `feed_qr_info` rejects the QRInfo with `Missing rotation ChainLock signatures in QRInfo: sigmN`. The correct pattern, matching upstream's `mine_quorum` + `move_blocks` convention: - Keep all masternodes running for the whole chain-gen. - After the last orchestrated DKG cycle (phase 5 leaves tip at `cycle_N + dkgMiningWindowEnd` = `cycle_N + 20`), send the SPV test transactions and mine just enough blocks to confirm them AND to land the final tip in the DKG Idle gap at `cycle_N + 23`. - `cycle_N + 23` is the last Idle-phase block before the next cycle's phase 1 at `cycle_N + 24`: no DKG is mid-flight at the exported tip, every rotating commit on-chain is real, and a subsequent QRInfo's work block at `tip - 8 = 399` (for N=384) already sees the latest cycle's commit at `cycle_N + 12 = 396`. Changes: - Remove `phase_6b_extend_to_quiet_tip` entirely and drop its call from `main`. - Rework `phase_6_generate_test_transactions` to send all SPV transactions into the mempool, then mine exactly `DKG_INTERVAL - 1 - DKG_MINING_WINDOW_END` = 3 blocks — lands tip at `cycle_N + 23` and confirms every transaction with ≥1 confirmation. Add an assertion that the entry tip is exactly `cycle_N + DKG_MINING_WINDOW_END` and that the final tip's cycle offset lands in the Idle gap `(DKG_MINING_WINDOW_END, DKG_INTERVAL)`. Verified end-to-end: tip=407 (407 % 24 = 23 ∈ [21, 23]), 8 real `llmq_test` quorums and 16 real `llmq_test_dip0024` rotating quorums (all with non-zero `quorumPublicKey`), zero null commits anywhere in the chain. --- generate_masternode.py | 113 +++++++++++++++++------------------------ 1 file changed, 46 insertions(+), 67 deletions(-) diff --git a/generate_masternode.py b/generate_masternode.py index 266d09c..851fce8 100755 --- a/generate_masternode.py +++ b/generate_masternode.py @@ -576,7 +576,34 @@ def phase_5_mine_dkg_cycles(network, num_cycles): def phase_6_generate_test_transactions(network): - """Send transactions to the SPV test wallet.""" + """Send transactions to the SPV test wallet and land tip in the DKG Idle gap. + + At entry the tip is `cycle_N + 20` (just past the last orchestrated + cycle's mining window, with masternodes still running). We send the SPV + test transactions into the mempool and then mine exactly enough blocks + to confirm them AND to land the final tip at `cycle_N + 23`, which is + the last Idle-phase block before the next cycle's phase 1 at + `cycle_{N+1} + 0`. + + Why this range: a tip at `cycle_N + X` for X in [21, 23] sits in the + DKG Idle window (the state machine moves to Idle at `cycle_N + 12` and + stays there until the next cycle's phase 1). Crucially: + + - No DKG phase is mid-flight at the exported tip, so when a test harness + restarts the masternodes they do not inherit a partially-lived DKG + (which would null-commit under them when later blocks reached the + mining window). + - The most recent rotating commit is already in evoDB (mined at + `cycle_N + 12`), so a subsequent QRInfo whose work block is `tip - 8` + can reach it — at tip 407 the work block is 399, well past the + commit at 396 for cycle_start=384. + - The tip is still BEFORE the next cycle's phase 1 at `cycle_N + 24`, + so `mine_dkg_cycle` invoked from the test harness aligns to that + boundary and drives the next DKG from phase 1 with MNs online. + + Masternodes stay running throughout so every rotating commitment in + the chain is real (non-zero `signers`, non-zero `quorumPublicKey`). + """ print("\n" + "=" * 60) print("Phase 6: Generate SPV test transactions") print("=" * 60) @@ -586,73 +613,26 @@ def phase_6_generate_test_transactions(network): amounts = [1.0, 5.0, 10.0, 0.5, 25.0, 0.1, 50.0, 2.5] for i, amount in enumerate(amounts): rpc.call("sendtoaddress", addresses[i % len(addresses)], amount) - if (i + 1) % 3 == 0: - network.move_blocks(1) - - network.move_blocks(6) + # Mine enough blocks to confirm all transactions and place the tip at + # `cycle_N + (DKG_INTERVAL - 1)` — the last Idle-phase block before the + # next cycle's phase 1. Phase 5 leaves tip at `cycle_N + DKG_MINING_WINDOW_END`, + # so we need `DKG_INTERVAL - 1 - DKG_MINING_WINDOW_END` additional blocks. height = rpc.call("getblockcount") - print(f" Generated {len(amounts)} test transactions (height: {height})") - - -def phase_6b_extend_to_quiet_tip(network): - """Stop masternodes and extend the chain past the next DKG mining window. - - After phase 5 (orchestrated DKG cycles) and phase 6 (test transactions), - the tip typically sits inside a DKG cycle whose phase 1 already started - but whose mining window has not opened yet (e.g. block 412, inside cycle - 408's phase 3 for the default 8-cycle run). Exporting at that tip is - pathological for test harnesses that bring the MNs back online later: - the MNs miss the early phases of the in-progress cycle and cannot - contribute, so when the test mines further blocks the cycle's mining - window fills with null commitments. Those null commits then leave a - future QRInfo range without ChainLock sigs for the rotation, tripping - the rust-dashcore QRInfo pre-check ("Missing rotation ChainLock - signatures in QRInfo"). - - The fix: stop the masternodes and mine controller-only blocks past the - next cycle's mining window + maturity. With no MN to broadcast qfcommit, - the controller's `minableCommitmentsByQuorum` is empty at the window, so - null commitments are mined deterministically and the cycle is SETTLED in - the chain. When the test harness later restarts MNs at the exported tip - and drives `mine_dkg_cycle`, it aligns to the next boundary and runs a - fresh DKG from phase 1 — no partial cycle to race against. - - Target tip formula matches the convention in the task brief: - `cycle_start_of_current_cycle + DKG_INTERVAL + dkgMiningWindowEnd + - SIGN_HEIGHT_OFFSET`. For the default run (current tip ~412, current - cycle start 408) this lands at 460. - """ - print("\n" + "=" * 60) - print("Phase 6b: Extend chain to quiet tip") - print("=" * 60) - - rpc = network.controller.rpc - current = rpc.call("getblockcount") - current_cycle_start = current - (current % DKG_INTERVAL) - target = current_cycle_start + DKG_INTERVAL + DKG_MINING_WINDOW_END + SIGN_HEIGHT_OFFSET - if current >= target: - print(f" Already at tip {current}, no extension needed") - return - - print(" Stopping masternodes (controller-only mining from here)...") - for mn in network.masternodes: - mn.stop() - time.sleep(2) - - # Mine on the controller in batches, advancing mocktime in step so block - # timestamps stay strictly increasing. Matches the cache-setup pattern - # in phase_1_bootstrap. - remaining = target - current - print(f" Mining {remaining} blocks on controller (tip {current} -> {target})...") - while remaining > 0: - batch = min(remaining, 25) - network.bump_mocktime(batch * 156) - rpc.call("generatetoaddress", batch, network.fund_address) - remaining -= batch - - final_tip = rpc.call("getblockcount") - print(f" Final tip: {final_tip}") + cycle_offset = height % DKG_INTERVAL + assert cycle_offset == DKG_MINING_WINDOW_END, ( + f"Expected phase 5 to leave tip at cycle_offset={DKG_MINING_WINDOW_END}, got {cycle_offset} (tip {height})" + ) + blocks_to_mine = DKG_INTERVAL - 1 - DKG_MINING_WINDOW_END + network.move_blocks(blocks_to_mine) + + final_height = rpc.call("getblockcount") + final_offset = final_height % DKG_INTERVAL + assert DKG_MINING_WINDOW_END < final_offset < DKG_INTERVAL, ( + f"Final tip {final_height} at cycle_offset={final_offset} is not in the " + f"DKG Idle gap ({DKG_MINING_WINDOW_END + 1}..{DKG_INTERVAL - 1})" + ) + print(f" Generated {len(amounts)} test transactions (tip: {final_height}, cycle_offset: {final_offset})") def phase_7_export(network, output_base_dir, dkg_cycles): @@ -756,7 +736,6 @@ def main(): phase_4_enable_sporks(network) phase_5_mine_dkg_cycles(network, args.dkg_cycles) phase_6_generate_test_transactions(network) - phase_6b_extend_to_quiet_tip(network) output_dir = phase_7_export(network, args.output_dir, args.dkg_cycles) print("\n" + "=" * 60) From 88ab7b6b5499911f77ee8dcbbb59f5db143aae9c Mon Sep 17 00:00:00 2001 From: xdustinface Date: Sat, 18 Apr 2026 23:26:15 +1000 Subject: [PATCH 7/8] fix: center exit tip in the DKG Idle gap for test-harness margin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously the exit tip landed at `cycle_N + 23` — the last block of the Idle gap, flush against the next cycle's phase 1 at `cycle_N + 24`. Any test harness that mines a single block during its setup before querying the chain (a common pattern for advancing mocktime or triggering a `UpdatedBlockTip` refresh) would cross into block `cycle_N + 24` and kick off a DKG phase 1 that the just-started masternodes can't participate in, leading to a null commitment at the next mining window. Shift the target one block earlier to `cycle_N + (DKG_INTERVAL - 2)` = `cycle_N + 22` — the center of the 3-block Idle gap `[cycle_N + 21, cycle_N + 23]`. This leaves a 1-block margin on each side, so harnesses mining up to one extra block stay inside the gap. For the default 8-cycle run this lands at 406 (last orchestrated cycle at 384, tip at `384 + 22 = 406`, `406 % 24 = 22`). Verified via a fresh dashd against the exported datadir: - getblockcount: 406, `406 % 24 = 22` - quorum list: 8 `llmq_test` + 16 `llmq_test_dip0024` rotating quorums, all with non-zero `quorumPublicKey` - No null commits anywhere: `quorum list` only surfaces commits that ProcessCommitment wrote to evoDB, which only happens for non-null commits --- generate_masternode.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/generate_masternode.py b/generate_masternode.py index 851fce8..9720944 100755 --- a/generate_masternode.py +++ b/generate_masternode.py @@ -614,16 +614,22 @@ def phase_6_generate_test_transactions(network): for i, amount in enumerate(amounts): rpc.call("sendtoaddress", addresses[i % len(addresses)], amount) - # Mine enough blocks to confirm all transactions and place the tip at - # `cycle_N + (DKG_INTERVAL - 1)` — the last Idle-phase block before the - # next cycle's phase 1. Phase 5 leaves tip at `cycle_N + DKG_MINING_WINDOW_END`, - # so we need `DKG_INTERVAL - 1 - DKG_MINING_WINDOW_END` additional blocks. + # Mine enough blocks to confirm all transactions and land the tip at the + # CENTER of the DKG Idle gap at `cycle_N + (DKG_INTERVAL - 2)` (= 22 for + # default params). The gap is the 3-block window `[cycle_N + 21, + # cycle_N + 23]`: past the last cycle's mining window and before the next + # cycle's phase 1 at `cycle_N + 24`. Centering the tip leaves one block + # of margin on each side so test harnesses that mine a block before + # checking don't accidentally cross `cycle_N + 24` and start a DKG the + # masternodes just-brought-online can't participate in. + # Phase 5 leaves tip at `cycle_N + DKG_MINING_WINDOW_END`, so we need + # `DKG_INTERVAL - 2 - DKG_MINING_WINDOW_END` additional blocks. height = rpc.call("getblockcount") cycle_offset = height % DKG_INTERVAL assert cycle_offset == DKG_MINING_WINDOW_END, ( f"Expected phase 5 to leave tip at cycle_offset={DKG_MINING_WINDOW_END}, got {cycle_offset} (tip {height})" ) - blocks_to_mine = DKG_INTERVAL - 1 - DKG_MINING_WINDOW_END + blocks_to_mine = DKG_INTERVAL - 2 - DKG_MINING_WINDOW_END network.move_blocks(blocks_to_mine) final_height = rpc.call("getblockcount") From 2e90f274c610d8ed4e611e68c373104e14d685b5 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Wed, 6 May 2026 23:16:39 +1000 Subject: [PATCH 8/8] chore: drop version suffix from masternode export directory name MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The exported directory is now `data/regtest-mn/` instead of `data/regtest-mn-v0.0.1/`. The release tag carries the version, so embedding it in the directory name was redundant and tied the path to a specific version that has to change every release. Matches the convention used by `generate.py` outputs (`regtest-15000/` etc.) — block count or scenario name in the directory, no version tag. --- generate_masternode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generate_masternode.py b/generate_masternode.py index 9720944..53c8188 100755 --- a/generate_masternode.py +++ b/generate_masternode.py @@ -650,7 +650,7 @@ def phase_7_export(network, output_base_dir, dkg_cycles): rpc = network.controller.rpc chain_height = rpc.call("getblockcount") - output_dir = Path(output_base_dir) / "regtest-mn-v0.0.1" + output_dir = Path(output_base_dir) / "regtest-mn" if output_dir.exists(): print(f" Removing existing output: {output_dir}") shutil.rmtree(output_dir)