From 5fb2b997adf36aad4a992c9aab22dc30b5c65481 Mon Sep 17 00:00:00 2001 From: Federico Mastellone Date: Mon, 2 Mar 2026 23:08:27 +0000 Subject: [PATCH 1/6] bench | tx-generator strictness analysis --- .../tx-generator/src/Cardano/Benchmarking/Wallet.hs | 13 +++++++------ bench/tx-generator/src/Cardano/TxGenerator/Fund.hs | 2 +- .../src/Cardano/TxGenerator/Internal/Fifo.hs | 5 +++-- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs b/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs index bf5739208ef..8ba0c542372 100644 --- a/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs +++ b/bench/tx-generator/src/Cardano/Benchmarking/Wallet.hs @@ -18,6 +18,7 @@ import Cardano.TxGenerator.Tx import Cardano.TxGenerator.Types import Cardano.TxGenerator.UTxO +import Data.List (foldl') import Prelude import Control.Concurrent.MVar @@ -64,13 +65,13 @@ askWalletRef r f = do -- | This does an insertion into the `MVar` contents. walletRefInsertFund :: WalletRef -> Fund -> IO () -walletRefInsertFund ref fund = modifyMVar_ ref $ \w -> return $ FundQueue.insertFund w fund +walletRefInsertFund ref fund = modifyMVar_ ref $ \w -> return $! FundQueue.insertFund w fund -- | 'mkWalletFundStoreList' hides its second argument in -- 'FundToStoreList'. This is not used anywhere. mkWalletFundStoreList :: WalletRef -> FundToStoreList IO mkWalletFundStoreList walletRef funds = modifyMVar_ walletRef - $ \wallet -> return (foldl FundQueue.insertFund wallet funds) + $ \wallet -> return $! foldl' FundQueue.insertFund wallet funds -- | 'mkWalletFundStore' hides its second argument in 'FundToStore'. -- This is only ever called in tandem with 'createAndStore' in @@ -79,16 +80,16 @@ mkWalletFundStoreList walletRef funds = modifyMVar_ walletRef -- 'WalletRef' 'MVar' by side effect. mkWalletFundStore :: WalletRef -> FundToStore IO mkWalletFundStore walletRef fund = modifyMVar_ walletRef - $ \wallet -> return $ FundQueue.insertFund wallet fund + $ \wallet -> return $! FundQueue.insertFund wallet fund -- | 'walletSource' is only ever used in -- 'Cardano.Benchmarking.Script.Core.evalGenerator' to pass -- to 'Cardano.TxGenerator.Tx.sourceToStoreTransaction' and -- its associated functions. walletSource :: WalletRef -> Int -> FundSource IO -walletSource ref munch = modifyMVar ref $ \fifo -> return $ case removeFunds munch fifo of - Nothing -> (fifo, Left $ TxGenError "WalletSource: out of funds") - Just (newFifo, funds) -> (newFifo, Right funds) +walletSource ref munch = modifyMVar ref $ \fifo -> case removeFunds munch fifo of + Nothing -> return (fifo, Left $ TxGenError "WalletSource: out of funds") + Just (newFifo, funds) -> return (newFifo, Right funds) -- | Just a preview of the wallet's funds; wallet remains unmodified. walletPreview :: WalletRef -> Int -> IO [Fund] diff --git a/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs b/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs index a2235ac3b5a..d1d6443e132 100644 --- a/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs +++ b/bench/tx-generator/src/Cardano/TxGenerator/Fund.hs @@ -36,7 +36,7 @@ import Data.Function (on) -- use of lenses. data FundInEra era = FundInEra { _fundTxIn :: !TxIn - , _fundWitness :: Witness WitCtxTxIn era + , _fundWitness :: !(Witness WitCtxTxIn era) , _fundVal :: !(TxOutValue era) , _fundSigningKey :: !(Maybe (SigningKey PaymentKey)) } diff --git a/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs b/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs index eaa0b9f27df..a04ceec7cb0 100644 --- a/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs +++ b/bench/tx-generator/src/Cardano/TxGenerator/Internal/Fifo.hs @@ -1,4 +1,3 @@ -{-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} {-| Module : Cardano.TxGenerator.Internal.Fifo Description : FIFO/queue data structure. @@ -48,7 +47,9 @@ remove :: Fifo a -> Maybe (Fifo a, a) remove fifo = case fifo of Fifo [] [] -> Nothing Fifo (h:t) y -> Just (Fifo t y, h) - Fifo [] y -> let ~(h:t) = reverse y in Just (Fifo t [], h) + Fifo [] y -> case reverse y of + (h:t) -> Just (Fifo t [], h) + [] -> Nothing -- | Dequeueing /n/ items just iterates calling remove within the -- `Maybe` monad. Removing n from a Fifo of length k when k < n is From 0b0927dd906b26ec3dfcf2dccb3aa283173f7f0b Mon Sep 17 00:00:00 2001 From: Federico Mastellone Date: Tue, 3 Mar 2026 00:39:23 +0000 Subject: [PATCH 2/6] Increase mempool capacity\nCopy MempoolCapacityBytesOverride from https://github.com/input-output-hk/ouroboros-leios/blob/ebc1f7c76b34e3d4f1b28485b720ed324633a6b3/demo/proto-devnet/config/config.yaml#L9 --- nix/workbench/service/nodes.nix | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nix/workbench/service/nodes.nix b/nix/workbench/service/nodes.nix index 27a4ddcfcf2..78e1678d23c 100644 --- a/nix/workbench/service/nodes.nix +++ b/nix/workbench/service/nodes.nix @@ -111,6 +111,9 @@ let ChainSyncIdleTimeout = 0; PeerSharing = false; + # Lower bound, 2 * maxEBClosure (12.5MB) + MempoolCapacityBytesOverride = 25000000; + ## defaults taken from: ouroboros-network/src/Ouroboros/Network/Diffusion/Configuration.hs ## NB. the following inequality must hold: known >= established >= active >= 0 SyncTargetNumberOfActivePeers = max 15 valency; # set to same value as TargetNumberOfActivePeers From 2daacd0c3c94d06c6709ce7b2e4817a97d2da4ce Mon Sep 17 00:00:00 2001 From: Federico Mastellone Date: Wed, 4 Mar 2026 12:15:35 +0000 Subject: [PATCH 3/6] Revert "Increase mempool capacity\nCopy MempoolCapacityBytesOverride from https://github.com/input-output-hk/ouroboros-leios/blob/ebc1f7c76b34e3d4f1b28485b720ed324633a6b3/demo/proto-devnet/config/config.yaml#L9" This reverts commit 47acc37e0fd3da5981725122a33b5fff26387eef. --- nix/workbench/service/nodes.nix | 3 --- 1 file changed, 3 deletions(-) diff --git a/nix/workbench/service/nodes.nix b/nix/workbench/service/nodes.nix index 78e1678d23c..27a4ddcfcf2 100644 --- a/nix/workbench/service/nodes.nix +++ b/nix/workbench/service/nodes.nix @@ -111,9 +111,6 @@ let ChainSyncIdleTimeout = 0; PeerSharing = false; - # Lower bound, 2 * maxEBClosure (12.5MB) - MempoolCapacityBytesOverride = 25000000; - ## defaults taken from: ouroboros-network/src/Ouroboros/Network/Diffusion/Configuration.hs ## NB. the following inequality must hold: known >= established >= active >= 0 SyncTargetNumberOfActivePeers = max 15 valency; # set to same value as TargetNumberOfActivePeers From 64fc5695bc90291b00df73052f6f8b3e4cb9bd0f Mon Sep 17 00:00:00 2001 From: Federico Mastellone Date: Wed, 4 Mar 2026 04:20:54 +0000 Subject: [PATCH 4/6] WIP: workbench hacks, make it work! --- .../Benchmarking/Profile/Builtin/Empty.hs | 2 +- .../Benchmarking/Profile/Vocabulary.hs | 2 +- nix/workbench/backend/nomad.nix | 6 + nix/workbench/service/generator.nix | 120 +++++++++++++++++- 4 files changed, 126 insertions(+), 4 deletions(-) diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs index 33040869fcf..36bb32c2d3d 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Builtin/Empty.hs @@ -63,7 +63,7 @@ fastDuration = ciTestDuration :: Types.Profile -> Types.Profile ciTestDuration = - V.timescaleCompressed . P.shutdownOnBlock 8 + V.timescaleCompressed . P.shutdownOnBlock 180 -- TODO: dummy "generator.epochs" ignored in favor of "--shutdown-on". -- Create a "time.epochs" or "time.blocks" or similar, IDK! -- This applies to all profiles! diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs index f9aa0f2af83..c69d4f5ceec 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs @@ -116,7 +116,7 @@ genesisVariantVoltaire = genesisVariantLatest -- Defined in the "genesis" property and it's for the tx-generator. fundsDefault :: Types.Profile -> Types.Profile -fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000 . P.utxoKeys 1 +fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000 . P.utxoKeys (6*500*3) -- Some profiles have a higher `funds_balance` in `Genesis`. Needed? Fix it? fundsDouble :: Types.Profile -> Types.Profile diff --git a/nix/workbench/backend/nomad.nix b/nix/workbench/backend/nomad.nix index 9b272b86b29..38111f5abb5 100644 --- a/nix/workbench/backend/nomad.nix +++ b/nix/workbench/backend/nomad.nix @@ -176,6 +176,12 @@ let # Avoid nix cache misses on every commit because of `set-git-rev`. flake-output = "cardanoNodePackages.tx-generator.passthru.noGitRev"; }; + tx-centrifuge = rec { + # Local reference only used if not "cloud". + nix-store-path = haskellProject.exes.tx-centrifuge; + flake-reference = "github:intersectmbo/cardano-node"; + flake-output = "cardanoNodePackages.tx-centrifuge"; + }; } ; diff --git a/nix/workbench/service/generator.nix b/nix/workbench/service/generator.nix index 2f4a294edc3..fe8e0d346a8 100644 --- a/nix/workbench/service/generator.nix +++ b/nix/workbench/service/generator.nix @@ -147,6 +147,121 @@ let let serviceConfig = generatorServiceConfig nodeSpecs; service = generatorServiceConfigService serviceConfig; + genesisFunds = + (let + # create-testnet-data --testnet-magic 42 --total-supply 2010000000000000 --utxo-keys 100 --genesis-keys 0 --delegated-supply 2000000000000000 --pools 2 --stake-delegators 2 --drep-keys 0 --stuffed-utxo 000000 + # Ends with 90000000000 each utxo-key. + # value = (profile.genesis.funds_balance - profile.genesis.shelley.protocolParams.poolDeposit * profile.composition.n_pools) / profile.genesis.utxo_keys; + value = (profile.derived.supply_total - profile.derived.supply_delegated) * 9 / (profile.genesis.utxo_keys * 10); + in +__toJSON + (builtins.genList + (i: + { signing_key = "../genesis/utxo-keys/utxo${toString (i+1)}.skey"; # Key index is not zero based =) + inherit value; + } + ) + profile.genesis.utxo_keys + ) + ) + ; + txCentrifugeConfig = + { # pull-fiction parameters. + ########################## + initial_inputs = + { type = "genesis_utxo_keys"; + params = + { network_magic = profile.genesis.network_magic; + signing_keys_file = "./funds.json"; + } + ; + } + ; + builder = + { type = "value"; + params = + { inputs_per_tx = 2; + outputs_per_tx = 2; + fee = 1000000; + optimistic_recycle = false; + } + ; + } + ; + rate_limit = + { scope = "shared"; + type = "token_bucket"; + params = { tps = 15; }; + } + ; + max_batch_size = 500; + on_exhaustion = "error"; + # One node per-workload. + workloads = + builtins.listToAttrs + (builtins.genList + (i: + { name = "node-${toString i}"; + value = + { targets = + { "${toString i}" = + { addr = "127.0.0.1"; + port = (30000 + i); + } + ; + } + ; + } + ; + } + ) + profile.composition.n_pool_hosts + ) + ; + # tx-centrifuge parameters. + ########################### + nodeConfig = "../${runningNode}/config.json"; + protocolParametersFile = "/tmp/protocol-parameters-queried.json"; + # Tracing parameters. + ##################### + TraceOptions = + { "" = + { backends = [ "Stdout MachineFormat" ]; + detail = "DNormal"; + severity = "Debug"; + }; + # ouroboros-network traces. + "KeepAlive" = { severity="Silence";}; + "KeepAlive.Receive.KeepAliveResponse" = { severity="Silence";}; + "KeepAlive.Send.KeepAlive" = { severity="Silence";}; + "TxSubmission2" = { severity="Silence";}; + "TxSubmission2.Receive" = { severity="Silence";}; + "TxSubmission2.Receive.MsgInit" = { severity="Silence";}; + "TxSubmission2.Receive.RequestTxIds" = { severity="Silence";}; + "TxSubmission2.Receive.RequestTxs" = { severity="Silence";}; + "TxSubmission2.Receive.Done" = { severity="Silence";}; + "TxSubmission2.Send" = { severity="Silence";}; + "TxSubmission2.Send.MsgInit" = { severity="Silence";}; + "TxSubmission2.Send.ReplyTxIds" = { severity="Silence";}; + "TxSubmission2.Send.ReplyTxs" = { severity="Silence";}; + "TxSubmission2.Send.Done" = { severity="Silence";}; + # tx-centrifuge traces. + "TxCentrifuge.Builder.NewTx" = { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.Builder.Recycle" = { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.RequestTxIds" = + { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.ReplyTxIds" = + { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.RequestTxs" = + { severity="Debug";detail="DDetailed";}; + "TxCentrifuge.TxSubmission.ReplyTxs" = + { severity="Debug";detail="DDetailed";}; + }; + TurnOnLogMetrics = false; + TurnOnLogging = true; + TraceOptionNodeName = "leios-generator"; + } + ; in { start = '' @@ -189,11 +304,12 @@ let # Extra workloads end ####################### ############################################# - ${service.script} + echo ${__toJSON genesisFunds} > ./funds.json + ${haskellProject.exes.tx-centrifuge}/bin/tx-centrifuge run-script.json '' ; - config = (service.decideRunScript service); + config = txCentrifugeConfig; # Not present on every profile. # Don't create a derivation to a file containing "null" !!! From b62bc437adc05a98cd8d82ee18a79f6ccf861887 Mon Sep 17 00:00:00 2001 From: Federico Mastellone Date: Tue, 3 Mar 2026 21:03:08 +0000 Subject: [PATCH 5/6] bench | tx-centrifuge: Leios tx-generator --- bench/tx-centrifuge/LICENSE | 177 ++++ bench/tx-centrifuge/NOTICE | 14 + bench/tx-centrifuge/README.md | 235 +++++ bench/tx-centrifuge/app/Main.hs | 529 ++++++++++++ bench/tx-centrifuge/bench/Bench.hs | 52 ++ .../data/config-multi-group.json | 817 ++++++++++++++++++ .../data/config-per-target-0_2.json | 229 +++++ .../data/config-per-target-200.json | 229 +++++ .../data/config-per-target-2k.json | 229 +++++ .../tx-centrifuge/data/config-shared-10.json | 229 +++++ .../data/config-shared-100k.json | 229 +++++ .../data/protocol-parameters.ci-test.json | 461 ++++++++++ .../Cardano/Benchmarking/PullFiction/Clock.hs | 69 ++ .../Benchmarking/PullFiction/Config/Raw.hs | 282 ++++++ .../PullFiction/Config/Runtime.hs | 423 +++++++++ .../PullFiction/Config/Validated.hs | 375 ++++++++ .../PullFiction/Internal/RateLimiter.hs | 247 ++++++ .../PullFiction/WorkloadRunner.hs | 293 +++++++ .../Benchmarking/TxCentrifuge/Client.hs | 334 +++++++ .../Benchmarking/TxCentrifuge/Connection.hs | 349 ++++++++ .../Cardano/Benchmarking/TxCentrifuge/Fund.hs | 182 ++++ .../Benchmarking/TxCentrifuge/Tracing.hs | 492 +++++++++++ .../TxCentrifuge/Tracing/Orphans.hs | 406 +++++++++ .../Cardano/Benchmarking/TxCentrifuge/Tx.hs | 168 ++++ .../test/lib/Test/PullFiction/Harness.hs | 533 ++++++++++++ bench/tx-centrifuge/test/pull-fiction/Main.hs | 25 + .../Test/PullFiction/GeneratorTest.hs | 127 +++ .../Test/PullFiction/PipelineTest.hs | 59 ++ .../tx-centrifuge/test/tx-centrifuge/Main.hs | 17 + .../tx-centrifuge/Test/TxCentrifuge/TxTest.hs | 223 +++++ bench/tx-centrifuge/tx-centrifuge.cabal | 228 +++++ cabal.project | 1 + 32 files changed, 8263 insertions(+) create mode 100644 bench/tx-centrifuge/LICENSE create mode 100644 bench/tx-centrifuge/NOTICE create mode 100644 bench/tx-centrifuge/README.md create mode 100644 bench/tx-centrifuge/app/Main.hs create mode 100644 bench/tx-centrifuge/bench/Bench.hs create mode 100644 bench/tx-centrifuge/data/config-multi-group.json create mode 100644 bench/tx-centrifuge/data/config-per-target-0_2.json create mode 100644 bench/tx-centrifuge/data/config-per-target-200.json create mode 100644 bench/tx-centrifuge/data/config-per-target-2k.json create mode 100644 bench/tx-centrifuge/data/config-shared-10.json create mode 100644 bench/tx-centrifuge/data/config-shared-100k.json create mode 100644 bench/tx-centrifuge/data/protocol-parameters.ci-test.json create mode 100644 bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Clock.hs create mode 100644 bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Raw.hs create mode 100644 bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Runtime.hs create mode 100644 bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Validated.hs create mode 100644 bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Internal/RateLimiter.hs create mode 100644 bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/WorkloadRunner.hs create mode 100644 bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Client.hs create mode 100644 bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Connection.hs create mode 100644 bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Fund.hs create mode 100644 bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing.hs create mode 100644 bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing/Orphans.hs create mode 100644 bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs create mode 100644 bench/tx-centrifuge/test/lib/Test/PullFiction/Harness.hs create mode 100644 bench/tx-centrifuge/test/pull-fiction/Main.hs create mode 100644 bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/GeneratorTest.hs create mode 100644 bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/PipelineTest.hs create mode 100644 bench/tx-centrifuge/test/tx-centrifuge/Main.hs create mode 100644 bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs create mode 100644 bench/tx-centrifuge/tx-centrifuge.cabal diff --git a/bench/tx-centrifuge/LICENSE b/bench/tx-centrifuge/LICENSE new file mode 100644 index 00000000000..f433b1a53f5 --- /dev/null +++ b/bench/tx-centrifuge/LICENSE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/bench/tx-centrifuge/NOTICE b/bench/tx-centrifuge/NOTICE new file mode 100644 index 00000000000..df6a765c219 --- /dev/null +++ b/bench/tx-centrifuge/NOTICE @@ -0,0 +1,14 @@ +Copyright 2019-2023 Input Output Global Inc (IOG), 2023-2026 Intersect. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/bench/tx-centrifuge/README.md b/bench/tx-centrifuge/README.md new file mode 100644 index 00000000000..21ed693eca4 --- /dev/null +++ b/bench/tx-centrifuge/README.md @@ -0,0 +1,235 @@ +# Tx Centrifuge & Pull-Fiction + +`tx-centrifuge` is a high-performance load generator for Cardano, built on top of the protocol-agnostic **Pull-Fiction** library. + +Unlike traditional load generators that "push" data at a fixed rate, this system is designed for **pull-based protocols**. It does not generate load by itself; instead, it acts as a **policer** that reacts to requests from downstream consumers, admitting or delaying them to enforce a configured rate ceiling. + +### Minimal Configuration Example + +A basic configuration defines how to load initial resources, how to build payloads, the desired rate, and where to send the results: + +```json +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 42, + "signing_keys_file": "funds.json" + } + }, + "builder": { "type": "value", "params": { "fee": 1000000 } }, + "rate_limit": { "type": "token_bucket", "params": { "tps": 10 } }, + "workloads": { + "group-A": { + "targets": { + "node-1": { "addr": "127.0.0.1", "port": 30000 }, + "node-2": { "addr": "127.0.0.1", "port": 30001 } + } + } + }, + "nodeConfig": "config.json", + "protocolParametersFile": "pp.json" +} +``` + +## Core Concepts: The Pull-Fiction Engine + +The underlying `pull-fiction` library implements a reactive rate-limiting strategy. It only produces data when a consumer asks for it, and only as fast as the rate limiter allows. + +### Reactive Rate Limiting +- **Downstream Driven**: Load is only dispensed in response to an explicit pull from a target. If the target doesn't ask, the engine stays idle. +- **Ceiling Enforcement**: The rate limiter enforces a tokens-per-second (TPS) ceiling. Even if a consumer pulls aggressively, the engine ensures the dispensed items never exceed the configured limit. +- **Fairness**: Token slots are claimed in a single atomic STM transaction, providing FIFO-fair scheduling across multiple workers sharing the same limiter. + +### Workloads and Targets +The configuration is organized into a hierarchy that defines the concurrency model: + +- **Target**: A single network endpoint (e.g., a Cardano node). Each target has a dedicated **Worker thread** that manages the network connection and handles requests. +- **Workload**: A logical grouping of targets. + - All targets within a workload share the same **Builder thread** and the same **Payload Queue**. + - **Transaction Profiles**: Each workload can define its own `builder` configuration. This allows you to generate different "profiles" of transactions (e.g., different sizes, complexities, or fees) for different groups of nodes. + - **Isolation**: By using multiple workloads, you can isolate different groups of targets. For example, one workload could simulate high-volume "small" transactions for one group of nodes, while another generates "heavy" transactions for another. + +### Pipeline Architecture +The engine operates as a decoupled production pipeline using generic `input` and `payload` types: +1. **Initial Inputs**: Starting resources (of type `input`) are partitioned across workloads. +2. **Input Queue (Unbounded)**: Holds available `input` items. +3. **Builder (One per Workload)**: A dedicated thread that pulls `input`s, produces a `payload`, and pairs it with any `[input]`s to be recycled. It pushes the `(payload, [input])` pair to the payload queue. +4. **Payload Queue (Bounded)**: The sole source of **backpressure**. The builder blocks here if consumers are slower than the production rate. +5. **Workers (One per Target)**: Threads that manage the consumer connection. They pull from the payload queue via a rate-limited fetcher. + +### Resource Recycling +To enable indefinite-duration runs with finite resources, inputs must be returned to the `Input Queue`. There are two main patterns: + +1. **Optimistic Recycling (Builder-level)**: The builder immediately returns resources to the `Input Queue` as soon as the payload is constructed, before it even enters the payload queue. This is the highest-throughput mode but assumes the payload will be successfully processed downstream. +2. **Standard Recycling (On-Fetch)**: The builder pairs the payload with the resources to be **recycled** as a `(payload, [input])` tuple in the payload queue. When a worker **fetches** this tuple from the payload queue (triggered by a downstream request), the library returns those resources to the `Input Queue` in a separate STM transaction before handing the payload to the worker. Note: recycling happens on fetch, not on downstream acknowledgement — if the worker is killed between fetch and delivery, those inputs are lost. + +## Configuration + +### Initial Inputs (`initial_inputs`) +The generator requires a set of initial UTxOs, configured in the `initial_inputs` section of the main configuration file. + +- **`type`**: The input loader variant (e.g., `"genesis_utxo_keys"`). +- **`params`**: +- - **`network_magic`**: Required for deriving UTxO references from keys (e.g., `42` for testnet). +- - **`signing_keys_file`**: Path to a JSON file (e.g., `funds.json`) containing the actual fund data. + +#### `funds.json` entry types +The file contains an array of fund objects. There are two distinct types: + +1. **Genesis Funds** (Key-only): Identified only by their signing key. The `TxIn` is derived automatically. + ```json + { "signing_key": "genesis.skey", "value": 1500000000000 } + ``` +2. **Payment Funds** (Explicit UTxO): Requires a specific transaction reference. + ```json + { "signing_key": "payment.skey", "value": 1000000, "tx_in": "df6...#0" } + ``` + +**Design Note**: The `funds.json` format is designed to be compatible with the output of `cardano-cli conway create-testnet-data --utxo-keys`. This allows you to immediately use an arbitrary large set of Shelley genesis keys created during testnet bootstrapping as the initial fund pool for the generator, without needing to manually create UTxOs once the network is live. + +### Rate Limiting (`rate_limit`) +The `rate_limit` field can be set at the **top level** or at the **workload level** (but not both — setting it at both levels is a validation error). If omitted entirely, targets run **unlimited** (no rate ceiling). + +The `scope` determines the granularity of the TPS ceiling. Available scopes depend on where the rate limit is defined: + +**Top-level scopes:** +- **`shared`** (default): A single rate limiter shared by all targets across all workloads. The configured TPS is the aggregate ceiling. +- **`per_workload`**: Each workload gets its own independent rate limiter at the full configured TPS (shared by its targets). +- **`per_target`**: Every target gets its own independent rate limiter at the full configured TPS. E.g., 10 TPS with 50 targets = 500 TPS aggregate. + +**Workload-level scopes:** +- **`shared`** (default): One rate limiter shared by all targets in the workload. The configured TPS is the aggregate ceiling for the workload. +- **`per_target`**: Every target in the workload gets its own independent rate limiter at the full configured TPS. + +### Cascading Defaults + +Most configuration fields can be set at multiple levels. The most specific value wins: + +- **`builder`**: workload > top-level. Setting it at **both** levels is a validation error. At least one must be set (no default). +- **`rate_limit`**: workload > top-level > **unlimited**. Setting it at **both** levels is a validation error. +- **`max_batch_size`**: target > workload > top-level > **1**. +- **`on_exhaustion`**: target > workload > top-level > **`block`**. + +Workload and target names must be non-empty and must not contain `.` or start with `@` (reserved for internal rate-limiter cache keys). + +### Batching and Flow Control +- **`max_batch_size`**: Limits the number of items (e.g., transactions) the generator will announce to a target in a single protocol request. + - This acts as a safety cap: even if a target's protocol allows for 500 items, a `max_batch_size` of 100 ensures the generator doesn't commit too much capacity to a single connection at once. + - This helps distribute the available "payload queue" more evenly across multiple targets and prevents a single aggressive node from starving others. +- **`on_exhaustion`**: + - `block`: The worker thread waits until the builder produces a new payload. + - `error`: The generator fails immediately if the builder cannot keep up with the requested TPS. + +## Cardano Implementation (`tx-centrifuge`) + +### Value Builder Parameters +These parameters define the **transaction profile** for a workload: +- `inputs_per_tx` / `outputs_per_tx`: Controls the transaction structure (size and complexity). +- `fee`: Fixed Lovelace fee per transaction. +- `optimistic_recycle`: + - `true`: Output UTxOs are recycled immediately by the builder, before the transaction enters the payload queue. + - `false`: Output UTxOs are recycled when a worker fetches the transaction from the payload queue (before it is delivered to the node). + +## Usage + +```bash +tx-centrifuge config.json +``` + +## Detailed Examples + +### 1. High-Throughput (Optimistic Recycling) +Optimized for maximum TPS using simple 1-in/1-out transactions. + +**`config.json` snippet:** +```json +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 42, + "signing_keys_file": "funds.1.json" + } + }, + "builder": { + "type": "value", + "params": { + "inputs_per_tx": 1, + "outputs_per_tx": 1, + "fee": 1000000, + "optimistic_recycle": true + } + }, + "rate_limit": { + "type": "token_bucket", + "scope": "shared", + "params": { "tps": 1000 } + }, + "workloads": { + "simulation": { + "targets": { + "node-0": { "addr": "127.0.0.1", "port": 30000 } + } + } + }, + "nodeConfig": "config.json", + "protocolParametersFile": "pp.json" +} +``` + +**`funds.1.json` snippet:** +```json +[ + {"signing_key": "utxo1.skey", "value": 1500000000000}, + {"signing_key": "utxo2.skey", "value": 1500000000000} +] +``` + +### 2. Large Transactions (Target-Specific Limits) +Uses complex transactions with independent rate limits for each target connection. + +**`config.json` snippet:** +```json +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 42, + "signing_keys_file": "funds.2.json" + } + }, + "builder": { + "type": "value", + "params": { + "inputs_per_tx": 5, + "outputs_per_tx": 5, + "fee": 2000000, + "optimistic_recycle": false + } + }, + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { "tps": 5 } + }, + "max_batch_size": 50, + "on_exhaustion": "block", + "workloads": { + "heavy-load": { + "targets": { + "edge-node": { "addr": "192.168.1.10", "port": 30001 } + } + } + } +} +``` + +**`funds.2.json` snippet:** +```json +[ + {"signing_key": "utxo1.skey", "value": 1000000000}, + {"signing_key": "utxo2.skey", "value": 1000000000}, + {"signing_key": "utxo3.skey", "value": 1000000000} +] +``` diff --git a/bench/tx-centrifuge/app/Main.hs b/bench/tx-centrifuge/app/Main.hs new file mode 100644 index 00000000000..b3ceb12ce96 --- /dev/null +++ b/bench/tx-centrifuge/app/Main.hs @@ -0,0 +1,529 @@ +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Main (main) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Concurrent (myThreadId) +import Control.Exception (finally) +import Control.Monad (forever, replicateM, when) +import Data.Bifunctor (first) +import Data.List.NonEmpty qualified as NE +import Data.Maybe (fromMaybe) +import Data.Monoid (Last(..)) +import GHC.Conc (labelThread) +import Numeric.Natural (Natural) +import System.Environment (getArgs) +import System.Exit (die) +import System.IO (hPutStrLn, stderr) +import Text.Printf (printf) +----------- +-- aeson -- +----------- +import Data.Aeson ((.:), (.:?)) +import Data.Aeson qualified as Aeson +import Data.Aeson.Types qualified as Aeson.Types +----------- +-- async -- +----------- +import Control.Concurrent.Async qualified as Async +---------------- +-- bytestring -- +---------------- +import Data.ByteString.Char8 qualified as BS8 +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +------------------------- +-- cardano-ledger-core -- +------------------------- +import Cardano.Ledger.Coin qualified as L +------------------ +-- cardano-node -- +------------------ +import Cardano.Node.Configuration.POM + ( parseNodeConfigurationFP + , makeNodeConfiguration + , defaultPartialNodeConfiguration + , PartialNodeConfiguration(..) + , NodeConfiguration + , ncProtocolConfig + ) +import Cardano.Node.Handlers.Shutdown (ShutdownConfig(..)) +import Cardano.Node.Protocol.Cardano (mkSomeConsensusProtocolCardano) +import Cardano.Node.Protocol.Types (SomeConsensusProtocol(..)) +import Cardano.Node.Types + ( ConfigYamlFilePath(..) + , NodeProtocolConfiguration(..) + , ProtocolFilepaths(..) + ) +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +------------- +-- network -- +------------- +import Network.Socket qualified as Socket +-------------------------- +-- ouroboros-consensus -- +-------------------------- +import Ouroboros.Consensus.Block.Abstract (CodecConfig) +import Ouroboros.Consensus.Config (configBlock, configCodec) +import Ouroboros.Consensus.Config.SupportsNode (getNetworkMagic) +import Ouroboros.Consensus.Node.ProtocolInfo (ProtocolInfo(..)) +--------------------------------- +-- ouroboros-network-framework -- +--------------------------------- +import Ouroboros.Network.IOManager (withIOManager) +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +------------------ +-- transformers -- +------------------ +import Control.Monad.Trans.Except (runExceptT) +------------------ +-- tx-generator -- +------------------ +import Cardano.TxGenerator.ProtocolParameters qualified as PP +------------------ +-- pull-fiction -- +------------------ +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw +import Cardano.Benchmarking.PullFiction.Config.Runtime qualified as Runtime +import Cardano.Benchmarking.PullFiction.Config.Validated qualified as Validated +import Cardano.Benchmarking.PullFiction.WorkloadRunner (runWorkload) +--------------------- +-- tx-centrifuge -- +--------------------- +import Cardano.Benchmarking.TxCentrifuge.Client (mkClient) +import Cardano.Benchmarking.TxCentrifuge.Connection + (CardanoBlock, connect) +import Cardano.Benchmarking.TxCentrifuge.Fund qualified as Fund +import Cardano.Benchmarking.TxCentrifuge.Tracing qualified as Tracing +import Cardano.Benchmarking.TxCentrifuge.Tx qualified as Tx + +-------------------------------------------------------------------------------- + +main :: IO () +main = do + + -- Config. + ---------- + + (runtime, codecConfig, networkId, networkMagic, ledgerPP, tracers) <- loadConfig + + -- Launch. + ---------- + + -- Start tx builders Asyncs. + -- A tx builder sits between the fund queue and the tx queue: + -- it pulls unspent funds, signs a transaction, and enqueues the + -- (tx, outputFunds) pair for a worker to submit. + -- The fund queue is unbounded (TQueue); the tx queue is bounded (TBQueue) + -- and provides backpressure. + let mkBuilder runtimeBuilder builderIndex = do + vb <- interpretBuilder (Runtime.parsedBuilder runtimeBuilder) + -- Create a "dEtERmiNisTic" signing key and its derived address. + -- All tx outputs go to this address; recycled funds carry this key. + let (signingKey, signingAddr) = createSigningKeyAndAddress networkId builderIndex + let builderName = Runtime.builderName runtimeBuilder + builderPipe = Runtime.builderPipe runtimeBuilder + fundQueue = Runtime.pipeInputQueue builderPipe + txQueue = Runtime.pipePayloadQueue builderPipe + builderLoop = forever $ do + inputFunds <- STM.atomically $ + replicateM + (fromIntegral (inputsPerTx vb)) + (STM.readTQueue fundQueue) + case Tx.buildTx ledgerPP signingAddr signingKey + inputFunds (outputsPerTx vb) (L.Coin (fee vb)) of + Left err -> die $ "Tx.buildTx: " ++ err + Right ans@(tx, outputFunds) -> do + Tracing.traceWith + (Tracing.trBuilder tracers) + (Tracing.mkBuilderNewTx builderName tx inputFunds outputFunds) + case optimisticRecycle vb of + False -> do + STM.atomically $ STM.writeTBQueue txQueue ans + True -> do + STM.atomically $ STM.writeTBQueue txQueue (tx,[]) + STM.atomically $ Runtime.pipeRecycle builderPipe outputFunds + Tracing.traceWith + (Tracing.trBuilder tracers) + (Tracing.mkBuilderRecycle builderName outputFunds) + async <- Async.async $ do + -- Always label the threads. + tid <- myThreadId + labelThread tid (Runtime.builderName runtimeBuilder) + builderLoop + -- Return linked async and with a labeled thread. + Async.link async + pure async + + -- IOManager (from ouroboros-network-framework, re-exported from + -- Win32-network) is a platform abstraction for asynchronous I/O: + -- + -- * On Windows it wraps an I/O Completion Port (IOCP) and spawns a + -- dedicated OS thread to dequeue completion packets. Sockets are + -- associated with this IOCP via 'associateWithIOManager'. + -- + -- * On POSIX (Linux, macOS) it is a complete no-op — the type is + -- @newtype IOManager = IOManager (forall hole. hole -> IO ())@ and + -- 'withIOManager' simply passes a dummy value to the callback. GHC's + -- built-in RTS I/O manager (epoll / kqueue) handles all socket + -- multiplexing transparently. + -- + -- The Win32-network documentation states that only one IOManager should run + -- at a time, so we cannot create one per target for isolation. Target + -- isolation comes from each target having its own Async thread and TCP + -- socket, not from the IOManager. + -- + -- Everything must live inside 'withIOManager' because on Windows the IOCP + -- handle is closed when 'withIOManager' returns, silently cancelling all + -- pending socket I/O. The 'finally cancelAll' block must therefore be + -- inside too — putting it outside would try to clean up asyncs whose network + -- I/O is already dead. + -- + -- Builders don't use ioManager (they only do STM queue operations and + -- 'buildTx'), but they are spawned inside so that the 'finally cancelAll' + -- cleanup covers them. Spawning them outside would leak asyncs if any + -- setup code between here and 'cancelAll' throws. + withIOManager $ \ioManager -> do + -- Start builders provinding a numeric index (zero based). + builders <- mapM + (uncurry mkBuilder) + (zip + (Runtime.builders runtime) + [0..] + ) + -- From 'String' (address) and 'Int' (port) to 'AddrInfo'. + let resolveAddr ip port = do + let hints = Socket.defaultHints + { Socket.addrSocketType = Socket.Stream + , Socket.addrFamily = Socket.AF_INET + } + addrs <- Socket.getAddrInfo + (Just hints) + (Just ip) + (Just (show port)) + case addrs of + [] -> die $ + "Cannot resolve target: " ++ ip + ++ ":" ++ show port + (a:_) -> pure a + -- The 'TargetWorker' callback, called once per 'Target'. + let targetWorker target fetchTx tryFetchTx = do + addrInfo <- resolveAddr + (Runtime.targetAddr target) + (Runtime.targetPort target) + result <- connect ioManager codecConfig networkMagic tracers addrInfo $ + mkClient + (Tracing.trTxSubmission tracers) + (Runtime.targetName target) + (Runtime.maxBatchSize target) + fetchTx tryFetchTx + case result of + Left err -> die $ Runtime.targetName target ++ ": " ++ err + Right () -> pure () + -- For each 'Workload'. + workers <- concat <$> mapM + (\workload -> runWorkload workload targetWorker) + (Map.elems $ Runtime.workloads runtime) + -- runWorkload returns unlinked asyncs; link them here so failures + -- propagate to the main thread immediately. + mapM_ Async.link workers + -- All asyncs (builders and workers) are linked to the main thread and run + -- forever. ANY completion — whether by exception or normal return — is + -- fatal: either the pipeline starved ('QueueStarved'), a connection + -- dropped, or a builder failed. + -- + -- 'waitAnyCatch' returns as soon as the first async finishes (without + -- re-throwing, so we keep control). 'finally cancelAll' then cancels every + -- remaining async before the program exits. + -- + -- 'Async.link' is still needed: if the main thread is blocked in + -- 'waitAnyCatch' waiting on async A but async B dies, 'link' delivers the + -- exception asynchronously, unblocking 'waitAnyCatch' immediately instead + -- of waiting for A to finish first. + let allAsyncs = builders ++ workers + cancelAll = mapM_ Async.cancel allAsyncs + (_, result) <- flip finally cancelAll $ + Async.waitAnyCatch allAsyncs + case result of + Left ex -> + die $ show ex + Right () -> + die "async terminated unexpectedly" + +-------------------------------------------------------------------------------- +-- Initial funds +-------------------------------------------------------------------------------- + +-- | How to load initial funds for the generator. +-- +-- This type is node-specific (it references signing keys and network magic), +-- so it lives here rather than in the @pull-fiction@ sub-library. The raw JSON +-- config stores this as an opaque 'Aeson.Value'; @Main@ parses it into this ADT +-- and loads funds before passing them to 'Validated.validate'. +data InitialFunds + = GenesisUTxOKeys + !Natural -- ^ Network magic. + !FilePath -- ^ Path to signing keys file. + +instance Aeson.FromJSON InitialFunds where + parseJSON = Aeson.withObject "InitialFunds" $ \o -> do + ty <- o .: "type" :: Aeson.Types.Parser String + case ty of + "genesis_utxo_keys" -> do + p <- o .: "params" + GenesisUTxOKeys <$> p .: "network_magic" <*> p .: "signing_keys_file" + _ -> fail $ "InitialFunds: unknown type " ++ show ty + ++ ", expected \"genesis_utxo_keys\"" + +-------------------------------------------------------------------------------- +-- Builder interpretation +-------------------------------------------------------------------------------- + +-- | Interpreted "value" builder configuration with defaults applied. +data ValueBuilder = ValueBuilder + { inputsPerTx :: !Natural + , outputsPerTx :: !Natural + , fee :: !Integer + , optimisticRecycle :: !Bool + } + +-- | Interpret a 'Raw.Builder' (opaque type + params) into a concrete +-- 'ValueBuilder'. Applies defaults (@inputs_per_tx@ = 1, @outputs_per_tx@ = 1) +-- and validates invariants. +interpretBuilder :: Raw.Builder -> IO ValueBuilder +interpretBuilder raw = case Raw.builderType raw of + "value" -> + case Aeson.Types.parseEither parseValueParams (Raw.builderParams raw) of + Left err -> die $ "Builder params: " ++ err + Right (maybeInputs, maybeOutputs, rawFee, mOR) -> do + let nInputs = fromMaybe 1 maybeInputs + nOutputs = fromMaybe 1 maybeOutputs + when (nInputs == 0) $ die "Builder: inputs_per_tx must be >= 1" + when (nOutputs == 0) $ die "Builder: outputs_per_tx must be >= 1" + when (rawFee < 0) $ die "Builder: fee must be >= 0" + pure ValueBuilder + { inputsPerTx = nInputs + , outputsPerTx = nOutputs + , fee = rawFee + , optimisticRecycle = case mOR of + Nothing -> False + Just oR -> oR + } + other -> die $ + "Builder: unknown type " ++ show other ++ ", expected \"value\"" + where + parseValueParams = Aeson.withObject "ValueParams" $ \o -> + (,,,) <$> o .:? "inputs_per_tx" + <*> o .:? "outputs_per_tx" + <*> o .: "fee" + <*> o .:? "optimistic_recycle" + +-------------------------------------------------------------------------------- +-- Signing key loading +-------------------------------------------------------------------------------- + +-- | Load a signing key from a hex string, applying an integer suffix to the +-- last 3 hex characters, and derive its address. +createSigningKeyAndAddress + :: Api.NetworkId + -> Int + -- Signing key used for all generated transactions. + -- Destination address derived from the signing key. + -> (Api.SigningKey Api.PaymentKey, Api.AddressInEra Api.ConwayEra) +createSigningKeyAndAddress networkId n + | n < 0 || n > 999 = + error $ "createSigningKeyAndAddress: out of range (0-999): " ++ show n + | otherwise = + let -- Hex string (32 bytes = 64 hex chars). + -- We use 61 chars + 3 chars suffix = 64 chars total. + -- If the input string is a CBOR-encoded hex string (e.g. from an + -- .skey file), strip the first 4 characters ("5820") which represent + -- the CBOR type and length prefix for 32 bytes of raw data. + prefix = "bed03030fd08a600647d99fa7cd94dae3ddab99b199c3f08f81949db3e422" + suffix = printf "%03d" n + hex = prefix ++ suffix + in case Api.deserialiseFromRawBytesHex @(Api.SigningKey Api.PaymentKey) (BS8.pack hex) of + Left err -> + error $ "createSigningKeyAndAddress: Failed to deserialise: " ++ show err + Right signingKey -> + let signingAddr = + Api.shelleyAddressInEra + (Api.shelleyBasedEra @Api.ConwayEra) $ + Api.makeShelleyAddress networkId + (Api.PaymentCredentialByKey + (Api.verificationKeyHash + (Api.getVerificationKey signingKey))) + Api.NoStakeAddress + in (signingKey, signingAddr) + +-------------------------------------------------------------------------------- +-- Initialization +-------------------------------------------------------------------------------- + +-- | Parse CLI args, load all configuration files, create protocol, +-- generate a signing key, load initial funds, and set up tracers. +-- +-- Returns a fully resolved 'Runtime.Runtime' (validated config, rate +-- limiters, pipeline queues, and initial funds already partitioned +-- across workloads). +loadConfig + :: IO ( -- | Fully resolved runtime (config + rate limiters + queues). + Runtime.Runtime Fund.Fund tx + -- | Codec config for serialising blocks on the wire. + , CodecConfig CardanoBlock + , Api.NetworkId + -- | Network magic for the handshake with cardano-node. + , Api.NetworkMagic + -- | Ledger protocol parameters for transaction building. + , Api.LedgerProtocolParameters Api.ConwayEra + -- | Logging / metrics tracers. + , Tracing.Tracers + ) +loadConfig = do + args <- getArgs + configFile <- case args of + [f] -> pure f + _ -> die "Usage: tx-centrifuge " + + hPutStrLn stderr "=== Tx Centrifuge ===" + hPutStrLn stderr "" + + -- Decode the full JSON object once; extract node-specific paths here (like + -- setupTracers reads trace config from the same file independently) and pass + -- the rest to the Raw → Validated → Runtime pipeline. + hPutStrLn stderr $ "Loading config from: " ++ configFile + rawValue <- Aeson.eitherDecodeFileStrict' configFile + >>= either (\e -> die $ "JSON: " ++ e) pure + let parseField field = + case Aeson.Types.parseEither (Aeson.withObject "Config" (.: field)) rawValue of + Left err -> die $ "Config: " ++ err + Right v -> pure v + nodeConfigPath <- parseField "nodeConfig" + ppPath <- parseField "protocolParametersFile" + raw <- case Aeson.fromJSON rawValue of + Aeson.Error err -> die $ "JSON: " ++ err + Aeson.Success cfg -> pure cfg + + -- Load initial funds. + -- Parse the opaque initialInputs JSON into the node-level InitialFunds ADT, + -- then load actual UTxO funds before validation. + funds <- case Aeson.fromJSON (Raw.initialInputs raw) of + Aeson.Error err -> die $ "initialInputs: " ++ err + Aeson.Success (GenesisUTxOKeys magic path) -> do + hPutStrLn stderr $ "Loading funds from: " ++ path + result <- Fund.loadFunds (magicToNetworkId magic) path + case result of + Left err -> die ("Fund.loadFunds: " ++ err) + Right [] -> die "Fund.loadFunds: no funds loaded" + Right (f:fs) -> do + let allFunds = f NE.:| fs + hPutStrLn stderr $ " Loaded " ++ show (NE.length allFunds) ++ " funds" + pure allFunds + -- Validate config and resolve into a Runtime. + -- Pipeline: Raw → Validated (with pre-loaded funds) → Runtime. + validated <- either die pure $ Validated.validate raw funds + runtime <- Runtime.resolve validated + + -- Load node configuration and create consensus protocol. + hPutStrLn stderr $ "Loading node config from: " ++ nodeConfigPath + nodeConfig <- mkNodeConfig nodeConfigPath >>= either die pure + protocol <- mkConsensusProtocol nodeConfig >>= either die pure + let codecConfig = protocolToCodecConfig protocol + networkId = protocolToNetworkId protocol + networkMagic = protocolToNetworkMagic protocol + + -- Load protocol parameters. + hPutStrLn stderr $ "Loading protocol parameters from: " ++ ppPath + protocolParameters <- + Aeson.eitherDecodeFileStrict' ppPath >>= either die pure + ledgerPP <- case PP.convertToLedgerProtocolParameters + Api.ShelleyBasedEraConway protocolParameters of + Left err -> die $ "convertToLedgerProtocolParameters: " ++ show err + Right pp -> pure pp + + -- Tracers. + tracers <- Tracing.setupTracers configFile + + pure ( runtime, codecConfig, networkId, networkMagic, ledgerPP, tracers ) + +-------------------------------------------------------------------------------- +-- Protocol helpers (inlined from NodeConfig.hs and OuroborosImports.hs) +-------------------------------------------------------------------------------- + +mkNodeConfig :: FilePath -> IO (Either String NodeConfiguration) +mkNodeConfig configFp_ = do + configYamlPc <- parseNodeConfigurationFP . Just $ configFp + pure $ first show $ makeNodeConfiguration (configYamlPc <> filesPc) + where + configFp = ConfigYamlFilePath configFp_ + filesPc :: PartialNodeConfiguration + filesPc = defaultPartialNodeConfiguration + { pncProtocolFiles = Last . Just $ + ProtocolFilepaths + { byronCertFile = Just "" + , byronKeyFile = Just "" + , shelleyKESFile = Just "" + , shelleyVRFFile = Just "" + , shelleyCertFile = Just "" + , shelleyBulkCredsFile = Just "" + } + , pncShutdownConfig = Last $ Just $ ShutdownConfig Nothing Nothing + , pncConfigFile = Last $ Just configFp + } + +mkConsensusProtocol + :: NodeConfiguration -> IO (Either String SomeConsensusProtocol) +mkConsensusProtocol nodeConfig = + case ncProtocolConfig nodeConfig of + NodeProtocolConfigurationCardano + byronCfg shelleyCfg alonzoCfg conwayCfg + dijkstraCfg hardforkCfg checkpointsCfg -> + first show <$> + runExceptT (mkSomeConsensusProtocolCardano + byronCfg shelleyCfg alonzoCfg conwayCfg + dijkstraCfg hardforkCfg checkpointsCfg Nothing) + +protocolToCodecConfig :: SomeConsensusProtocol -> CodecConfig CardanoBlock +protocolToCodecConfig (SomeConsensusProtocol Api.CardanoBlockType info) = + configCodec $ pInfoConfig $ fst $ Api.protocolInfo @IO info +protocolToCodecConfig _ = + error "protocolToCodecConfig: non-Cardano protocol" + +-- | Derive NetworkId from the consensus config. Mainnet uses a +-- well-known magic number; everything else is a testnet. +protocolToNetworkId :: SomeConsensusProtocol -> Api.NetworkId +protocolToNetworkId proto = case protocolToNetworkMagic proto of + Api.NetworkMagic 764824073 -> Api.Mainnet + nm -> Api.Testnet nm + +protocolToNetworkMagic :: SomeConsensusProtocol -> Api.NetworkMagic +protocolToNetworkMagic + (SomeConsensusProtocol Api.CardanoBlockType info) = + getNetworkMagic $ configBlock $ pInfoConfig $ + fst $ Api.protocolInfo @IO info +protocolToNetworkMagic _ = + error "protocolToNetworkMagic: non-Cardano protocol" + +-- | Convert a raw network magic number to a 'Api.NetworkId'. +-- Mainnet uses the well-known magic 764824073; everything else is a testnet. +magicToNetworkId :: Natural -> Api.NetworkId +magicToNetworkId 764824073 = Api.Mainnet +magicToNetworkId n = Api.Testnet (Api.NetworkMagic (fromIntegral n)) diff --git a/bench/tx-centrifuge/bench/Bench.hs b/bench/tx-centrifuge/bench/Bench.hs new file mode 100644 index 00000000000..756aab580b1 --- /dev/null +++ b/bench/tx-centrifuge/bench/Bench.hs @@ -0,0 +1,52 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +module Main where + +-------------------------------------------------------------------------------- + +--------------- +-- criterion -- +--------------- +import Criterion.Main qualified as Criterion +------------- +-- deepseq -- +------------- +import Control.DeepSeq (NFData (..), deepseq) +--------------------- +-- tx-centrifuge -- +--------------------- +import Paths_tx_centrifuge qualified as Paths +import Test.PullFiction.Harness qualified as Harness + +-------------------------------------------------------------------------------- + +-- | Local wrapper so Criterion can force benchmark results without requiring an +-- NFData instance in the test-harness library. +newtype BenchResult = BenchResult Harness.TestResult + +instance NFData BenchResult where + rnf (BenchResult result) = + Harness.elapsedSeconds result `seq` + Harness.targetCounts result `deepseq` () + +-------------------------------------------------------------------------------- + +main :: IO () +main = do + sharedPath <- Paths.getDataFileName "data/config-shared-100k.json" + perTargetPath <- Paths.getDataFileName "data/config-per-target-200.json" + Criterion.defaultMain + [ Criterion.bgroup "generator-throughput" + [ Criterion.bench + "shared-limiter-100k-tps-50-targets" + $ Criterion.nfIO + $ BenchResult <$> Harness.runTpsTest sharedPath 5.0 + , Criterion.bench + "per-target-limiter-200-tps-50-targets" + $ Criterion.nfIO + $ BenchResult <$> Harness.runTpsTest perTargetPath 5.0 + ] + ] diff --git a/bench/tx-centrifuge/data/config-multi-group.json b/bench/tx-centrifuge/data/config-multi-group.json new file mode 100644 index 00000000000..85d4499fc92 --- /dev/null +++ b/bench/tx-centrifuge/data/config-multi-group.json @@ -0,0 +1,817 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + } + }, + "workloads": { + "group-01": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-01": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-02": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-02": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-03": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-03": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-04": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-04": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-05": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-05": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-06": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-06": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-07": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-07": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-08": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-08": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-09": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-09": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-10": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-10": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-11": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-11": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-12": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-12": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-13": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-13": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-14": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-14": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-15": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-15": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-16": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-16": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-17": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-17": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-18": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-18": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-19": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-19": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-20": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-20": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-21": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-21": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-22": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-22": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-23": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-23": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-24": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-24": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-25": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-25": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-26": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-26": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-27": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-27": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-28": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-28": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-29": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-29": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-30": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-30": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-31": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-31": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-32": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-32": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-33": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-33": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-34": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-34": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-35": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-35": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-36": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-36": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-37": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-37": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-38": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-38": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-39": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-39": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-40": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-40": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-41": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-41": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-42": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-42": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-43": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-43": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-44": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-44": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-45": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-45": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-46": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-46": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-47": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-47": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-48": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-48": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-49": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-49": { + "addr": "127.0.0.1", + "port": 3001 + } + } + }, + "group-50": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 1 + } + }, + "max_batch_size": 500, + "targets": { + "group-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-per-target-0_2.json b/bench/tx-centrifuge/data/config-per-target-0_2.json new file mode 100644 index 00000000000..bfa8f536410 --- /dev/null +++ b/bench/tx-centrifuge/data/config-per-target-0_2.json @@ -0,0 +1,229 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + } + }, + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 0.2 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-per-target-200.json b/bench/tx-centrifuge/data/config-per-target-200.json new file mode 100644 index 00000000000..4f96188ac67 --- /dev/null +++ b/bench/tx-centrifuge/data/config-per-target-200.json @@ -0,0 +1,229 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + } + }, + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 4000 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-per-target-2k.json b/bench/tx-centrifuge/data/config-per-target-2k.json new file mode 100644 index 00000000000..0efe0483bdd --- /dev/null +++ b/bench/tx-centrifuge/data/config-per-target-2k.json @@ -0,0 +1,229 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + } + }, + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "per_target", + "params": { + "tps": 2000 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-shared-10.json b/bench/tx-centrifuge/data/config-shared-10.json new file mode 100644 index 00000000000..d5e076e007d --- /dev/null +++ b/bench/tx-centrifuge/data/config-shared-10.json @@ -0,0 +1,229 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + } + }, + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "shared", + "params": { + "tps": 10 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/config-shared-100k.json b/bench/tx-centrifuge/data/config-shared-100k.json new file mode 100644 index 00000000000..7307f0843ef --- /dev/null +++ b/bench/tx-centrifuge/data/config-shared-100k.json @@ -0,0 +1,229 @@ +{ + "initial_inputs": { + "type": "genesis_utxo_keys", + "params": { + "network_magic": 0, + "signing_keys_file": "/dev/null" + } + }, + "builder": { + "type": "value", + "params": { + "fee": 200000 + } + }, + "workloads": { + "default": { + "rate_limit": { + "type": "token_bucket", + "scope": "shared", + "params": { + "tps": 100000 + } + }, + "max_batch_size": 500, + "targets": { + "node-01": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-02": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-03": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-04": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-05": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-06": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-07": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-08": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-09": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-10": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-11": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-12": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-13": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-14": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-15": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-16": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-17": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-18": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-19": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-20": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-21": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-22": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-23": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-24": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-25": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-26": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-27": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-28": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-29": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-30": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-31": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-32": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-33": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-34": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-35": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-36": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-37": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-38": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-39": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-40": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-41": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-42": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-43": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-44": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-45": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-46": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-47": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-48": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-49": { + "addr": "127.0.0.1", + "port": 3001 + }, + "node-50": { + "addr": "127.0.0.1", + "port": 3001 + } + } + } + } +} diff --git a/bench/tx-centrifuge/data/protocol-parameters.ci-test.json b/bench/tx-centrifuge/data/protocol-parameters.ci-test.json new file mode 100644 index 00000000000..832d72f2f1e --- /dev/null +++ b/bench/tx-centrifuge/data/protocol-parameters.ci-test.json @@ -0,0 +1,461 @@ +{ + "collateralPercentage": 150, + "costModels": { + "PlutusV1": [ + 197209, + 0, + 1, + 1, + 396231, + 621, + 0, + 1, + 150000, + 1000, + 0, + 1, + 150000, + 32, + 2477736, + 29175, + 4, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 29773, + 100, + 100, + 100, + 29773, + 100, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 1000, + 0, + 1, + 150000, + 32, + 150000, + 1000, + 0, + 8, + 148000, + 425507, + 118, + 0, + 1, + 1, + 150000, + 1000, + 0, + 8, + 150000, + 112536, + 247, + 1, + 150000, + 10000, + 1, + 136542, + 1326, + 1, + 1000, + 150000, + 1000, + 1, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 1, + 1, + 150000, + 1, + 150000, + 4, + 103599, + 248, + 1, + 103599, + 248, + 1, + 145276, + 1366, + 1, + 179690, + 497, + 1, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 148000, + 425507, + 118, + 0, + 1, + 1, + 61516, + 11218, + 0, + 1, + 150000, + 32, + 148000, + 425507, + 118, + 0, + 1, + 1, + 148000, + 425507, + 118, + 0, + 1, + 1, + 2477736, + 29175, + 4, + 0, + 82363, + 4, + 150000, + 5000, + 0, + 1, + 150000, + 32, + 197209, + 0, + 1, + 1, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 150000, + 32, + 3345831, + 1, + 1 + ], + "PlutusV3": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "decentralization": null, + "executionUnitPrices": { + "priceMemory": 5.77e-2, + "priceSteps": 7.21e-5 + }, + "extraPraosEntropy": null, + "maxBlockBodySize": 65536, + "maxBlockExecutionUnits": { + "memory": 50000000, + "steps": 40000000000 + }, + "maxBlockHeaderSize": 1100, + "maxCollateralInputs": 3, + "maxTxExecutionUnits": { + "memory": 10000000, + "steps": 10000000000 + }, + "maxTxSize": 16384, + "maxValueSize": 5000, + "minPoolCost": 340000000, + "minUTxOValue": null, + "monetaryExpansion": 3.0e-3, + "poolPledgeInfluence": 0.3, + "poolRetireMaxEpoch": 18, + "protocolVersion": { + "major": 6, + "minor": 0 + }, + "stakeAddressDeposit": 2000000, + "stakePoolDeposit": 500000000, + "stakePoolTargetNum": 500, + "treasuryCut": 0.2, + "txFeeFixed": 155381, + "txFeePerByte": 44, + "utxoCostPerByte": 4310 +} \ No newline at end of file diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Clock.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Clock.hs new file mode 100644 index 00000000000..67912f5aebb --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Clock.hs @@ -0,0 +1,69 @@ +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE ImportQualifiedPost #-} + +-------------------------------------------------------------------------------- + +-- | Single source of truth for monotonic time across the pull-fiction library. +-- +-- Every module in the package must obtain timestamps through this module rather +-- than importing @System.Clock@ directly. This guarantees that all call sites +-- use the same clock ('Clock.MonotonicRaw') and prevents hard-to-diagnose bugs +-- caused by accidentally mixing different clocks (e.g. 'Clock.Monotonic' vs +-- 'Clock.MonotonicRaw'), which can produce negative deltas or phantom drift on +-- systems where NTP adjusts the non-raw monotonic source. +-- +-- 'TimeSpec' is a @newtype@ over 'Clock.TimeSpec' so that code importing +-- @System.Clock@ directly cannot accidentally pass its timestamps to functions +-- expecting this module's 'TimeSpec', and vice versa. + +module Cardano.Benchmarking.PullFiction.Clock + ( -- * Types. + TimeSpec + -- * Reading the clock. + , getTime + -- * Conversions. + , toNanoSecs + , fromNanoSecs + ) where + +-------------------------------------------------------------------------------- + +----------- +-- clock -- +----------- +import System.Clock qualified as Clock + +-------------------------------------------------------------------------------- + +-- | Opaque monotonic timestamp. +-- +-- A @newtype@ wrapper that ensures only timestamps obtained via 'getTime' +-- (which always reads 'Clock.MonotonicRaw') are used in the core library. +-- +-- Internally a 'Clock.TimeSpec' stores two fields: @sec@ (seconds) and +-- @nsec@ (nanoseconds within the current second, 0–999 999 999). The derived +-- 'Num' instance normalizes after every operation: carries and borrows +-- between @sec@ and @nsec@ are handled automatically, so @timeA - timeB@ +-- always produces a correctly normalized result even when the nanoseconds +-- component underflows. +newtype TimeSpec = TimeSpec Clock.TimeSpec + deriving (Eq, Ord, Show, Num) + +-- | Read the monotonic raw clock. All timing in the package goes through this +-- function so a single clock source is used everywhere. +getTime :: IO TimeSpec +getTime = TimeSpec <$> Clock.getTime Clock.MonotonicRaw + +-- | Convert a 'TimeSpec' to __total__ nanoseconds. +-- +-- Returns @sec * 1 000 000 000 + nsec@, not just the @nsec@ field. +-- For example, @TimeSpec 2 500000000@ (2.5 s) yields @2 500 000 000@. +toNanoSecs :: TimeSpec -> Integer +toNanoSecs (TimeSpec ts) = Clock.toNanoSecs ts + +-- | Convert total nanoseconds to a 'TimeSpec'. +-- +-- Splits via @divMod@ into @sec@ and @nsec@ so the result is always +-- normalized (e.g. @fromNanoSecs 2500000000@ gives @TimeSpec 2 500000000@). +fromNanoSecs :: Integer -> TimeSpec +fromNanoSecs = TimeSpec . Clock.fromNanoSecs diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Raw.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Raw.hs new file mode 100644 index 00000000000..110b7eb4aef --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Raw.hs @@ -0,0 +1,282 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} + +-------------------------------------------------------------------------------- + +-- | Raw load-generator configuration parsed from JSON. +-- +-- A plain Aeson parser with no extra logic. Each 'FromJSON' instance is a +-- direct transformation from JSON values to Haskell base types ('String', +-- 'Natural', 'Double', 'Int', etc.); optional fields are 'Maybe' and named +-- collections are @'Map' 'String'@. No defaults are applied, no business rules +-- are checked, and no cross-field relationships are enforced. +-- All of that is the responsibility of +-- "Cardano.Benchmarking.PullFiction.Config.Validated". +-- +-- All data constructors and fields are exported so that test code can build +-- configuration values directly without going through JSON. +-- +-- __Import qualified.__ Field names clash with +-- "Cardano.Benchmarking.PullFiction.Config.Validated" and +-- "Cardano.Benchmarking.PullFiction.Config.Runtime". +module Cardano.Benchmarking.PullFiction.Config.Raw + ( + -- * Config. + Config (..) + + -- * Builder. + , Builder (..) + + -- * RateLimit. + , RateLimit (..) + -- ** TopLevelScope. + , TopLevelScope (..) + -- ** WorkloadScope. + , WorkloadScope (..) + + -- * OnExhaustion. + , OnExhaustion (..) + + -- * Workload. + , Workload (..) + + -- * Target. + , Target (..) + + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Monad (when) +import Numeric.Natural (Natural) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +import Data.Aeson ((.:), (.:?)) +import Data.Aeson.Types qualified as Aeson.Types +---------------- +-- containers -- +---------------- +import Data.Map.Strict (Map) + +-------------------------------------------------------------------------------- + +-- | Top-level configuration as parsed from JSON. +-- +-- No invariants are enforced. Use 'validate' from +-- "Cardano.Benchmarking.PullFiction.Config.Validated" to apply business +-- rules and cascading defaults. +data Config = Config + { -- | Raw JSON value describing how to load initial inputs. + -- Interpretation is left to the caller (e.g. @Main.hs@). + initialInputs :: !Aeson.Value + -- | Optional top level @\"builder\"@. + , maybeTopLevelBuilder :: !(Maybe Builder) + -- | Optional top-level @\"rate_limit\"@. + , maybeTopLevelRateLimit :: !(Maybe (Maybe TopLevelScope, RateLimit)) + -- | Optional top-level @\"max_batch_size\"@. + , maybeTopLevelMaxBatchSize :: !(Maybe Natural) + -- | Optional top-level @\"on_exhaustion\"@. + , maybeTopLevelOnExhaustion :: !(Maybe OnExhaustion) + -- | Generator workloads keyed by name. Because Aeson decodes JSON objects + -- into a 'Map', duplicate workload names are silently discarded (last + -- value wins). + , workloads :: !(Map String Workload) + } + deriving (Show, Eq) + +instance Aeson.FromJSON Config where + parseJSON = Aeson.withObject "Config" $ \o -> + Config + <$> o .: "initial_inputs" + <*> o .:? "builder" + <*> Aeson.Types.explicitParseFieldMaybe parseTopLevelRateLimit o "rate_limit" + <*> o .:? "max_batch_size" + <*> o .:? "on_exhaustion" + <*> o .: "workloads" + +-------------------------------------------------------------------------------- + +-- | Opaque builder configuration. +-- +-- Carries a @\"type\"@ discriminator and an opaque @\"params\"@ object. +-- Interpretation of the params is the caller's responsibility (see @Main.hs@), +-- just like 'initialInputs'. +data Builder = Builder + { -- | Builder variant name (e.g. @\"value\"@). Non-empty. + builderType :: !String + -- | Opaque params object for the variant. + , builderParams :: !Aeson.Value + } + deriving (Show, Eq) + +instance Aeson.FromJSON Builder where + parseJSON = Aeson.withObject "Builder" $ \o -> do + ty <- o .: "type" :: Aeson.Types.Parser String + when (null ty) $ fail "Builder: type must be non-empty" + Builder ty <$> o .: "params" + +-------------------------------------------------------------------------------- + +-- | Scope of a top-level rate limiter. +-- +-- There is no @Distributed@ scope. A \"distributed\" mode would be equivalent +-- to 'TopPerWorkload' or 'TopPerTarget' but with the TPS divided internally by +-- the number of sub-entities. We avoid that: the config should state the +-- per-entity TPS directly so the value is explicit and auditable. +data TopLevelScope + -- | One rate limiter shared by all targets across all workloads. + = TopShared + -- | Each workload gets its own rate limiter at the full configured TPS. + | TopPerWorkload + -- | Each target gets its own rate limiter at the full configured TPS. + | TopPerTarget + deriving (Show, Eq) + +-- | Scope of a workload-level rate limiter. +-- +-- 'TopPerWorkload' is not valid here (we are already at the workload level). +data WorkloadScope + -- | One rate limiter shared by all targets in the workload. + = WorkloadShared + -- | Each target gets its own rate limiter at the full configured TPS. + | WorkloadPerTarget + deriving (Show, Eq) + +-- | Rate limit configuration. +-- +-- Scope is not part of the rate limit itself; it is carried alongside the +-- 'RateLimit' in the enclosing tuple (e.g. @(TopLevelScope, RateLimit)@). +-- +-- The JSON representation uses @\"type\"@ + @\"params\"@ at the same level; +-- the parser flattens the nested @\"params\"@ object into the constructor. +data RateLimit + = TokenBucket + { -- | Target tokens per second. + tps :: !Double + } + deriving (Show, Eq) + +-- | Parse a rate limit from JSON using a context-specific scope parser. +-- +-- Scope is optional (defaults to @\"shared\"@ at validation time) and parsed +-- first; it is not part of 'RateLimit'. +-- +-- At the top level, use 'parseTopLevelRateLimit' (accepts @\"shared\"@, +-- @\"per_workload\"@, @\"per_target\"@). +-- At the workload level, use 'parseWorkloadRateLimit' (accepts @\"shared\"@, +-- @\"per_target\"@). +parseRateLimit + :: (String -> Aeson.Types.Parser scope) + -> Aeson.Value + -> Aeson.Types.Parser (Maybe scope, RateLimit) +parseRateLimit scopeParser = Aeson.withObject "RateLimit" $ \o -> do + maybeScopeStr <- o .:? "scope" + maybeScope <- case maybeScopeStr of + Nothing -> pure Nothing + Just s -> Just <$> scopeParser s + ty <- o .: "type" :: Aeson.Types.Parser String + case ty of + "token_bucket" -> do + p <- o .: "params" + rl <- TokenBucket <$> p .: "tps" + pure (maybeScope, rl) + _ -> fail $ + "RateLimit: unknown type " ++ show ty ++ ", expected \"token_bucket\"" + +parseTopLevelRateLimit :: Aeson.Value + -> Aeson.Types.Parser (Maybe TopLevelScope, RateLimit) +parseTopLevelRateLimit = parseRateLimit topLevelScopeParser + +parseWorkloadRateLimit :: Aeson.Value + -> Aeson.Types.Parser (Maybe WorkloadScope, RateLimit) +parseWorkloadRateLimit = parseRateLimit workloadScopeParser + +topLevelScopeParser :: String -> Aeson.Types.Parser TopLevelScope +topLevelScopeParser "shared" = pure TopShared +topLevelScopeParser "per_workload" = pure TopPerWorkload +topLevelScopeParser "per_target" = pure TopPerTarget +topLevelScopeParser s = fail $ "RateLimit: unknown scope " ++ show s + +workloadScopeParser :: String -> Aeson.Types.Parser WorkloadScope +workloadScopeParser "shared" = pure WorkloadShared +workloadScopeParser "per_target" = pure WorkloadPerTarget +workloadScopeParser s = fail $ + "RateLimit: unknown scope " ++ show s + ++ "; at workload level, only \"shared\" and \"per_target\" are valid" + +-------------------------------------------------------------------------------- + +-- | What to do when the payload queue, the output of the builder stage, is +-- exhausted. +data OnExhaustion + -- | Block / wait. + = Block + -- | Fail immediately with an error. + | Error + deriving (Show, Eq) + +instance Aeson.FromJSON OnExhaustion where + parseJSON = Aeson.withText "OnExhaustion" $ \t -> case t of + "block" -> pure Block + "error" -> pure Error + _ -> fail $ + "OnExhaustion: expected \"block\" or \"error\", got " ++ show t + +-------------------------------------------------------------------------------- + +-- | Configuration for a single workload as parsed from JSON. +-- +-- The workload name is the 'Map' key in the parent 'Config'; it is not stored +-- inside the record. +data Workload = Workload + { -- | Optional builder for this workload. + maybeBuilder :: !(Maybe Builder) + -- | Optional rate limit for this workload. + , maybeRateLimit :: !(Maybe (Maybe WorkloadScope, RateLimit)) + -- | Optional max tokens per request. + , maybeMaxBatchSize :: !(Maybe Natural) + -- | Optional on-exhaustion behaviour. + , maybeOnExhaustion :: !(Maybe OnExhaustion) + -- | Targets keyed by name. Duplicate target names are silently discarded + -- (last value wins) because Aeson decodes JSON objects into a 'Map'. + , targets :: !(Map String Target) + } + deriving (Show, Eq) + +instance Aeson.FromJSON Workload where + parseJSON = Aeson.withObject "Workload" $ \o -> + Workload + <$> o .:? "builder" + <*> Aeson.Types.explicitParseFieldMaybe parseWorkloadRateLimit o "rate_limit" + <*> o .:? "max_batch_size" + <*> o .:? "on_exhaustion" + <*> o .: "targets" + +-------------------------------------------------------------------------------- + +-- | A target endpoint to connect to. +-- +-- The target name is the 'Map' key in the parent 'Workload'; it is not stored +-- inside the record. +data Target = Target + { -- | Optional per-target @\"max_batch_size\"@ override. + maybeTargetMaxBatchSize :: !(Maybe Natural) + -- | Optional per-target @\"on_exhaustion\"@ override. + , maybeTargetOnExhaustion :: !(Maybe OnExhaustion) + , addr :: !String + , port :: !Int + } + deriving (Show, Eq) + +instance Aeson.FromJSON Target where + parseJSON = Aeson.withObject "Target" $ \o -> + Target + <$> o .:? "max_batch_size" + <*> o .:? "on_exhaustion" + <*> o .: "addr" + <*> o .: "port" diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Runtime.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Runtime.hs new file mode 100644 index 00000000000..2765307a637 --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Runtime.hs @@ -0,0 +1,423 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +-- | Resolved runtime configuration. +-- +-- "Cardano.Benchmarking.PullFiction.Config.Validated" has already validated +-- every invariant and cascaded top-level defaults into workloads. This module +-- creates the STM resources (rate limiters, pipeline queues) that downstream +-- code needs to run the load generator. +-- +-- Rate limiters are created during resolution. Each validated target carries +-- a pre-computed 'Validated.rateLimitKey' that encodes the sharing boundary: +-- +-- * @\@global@: one 'RL.RateLimiter' shared by all targets across all +-- workloads. +-- * @workloadName@: all targets in the workload share one +-- 'RL.RateLimiter' at the configured TPS. +-- * @workloadName.targetName@: each target gets its own +-- 'RL.RateLimiter' at the full configured TPS. +-- * No rate limit: each target gets 'RL.newUnlimited'. +-- +-- Pipeline queues are created during resolution: +-- +-- Each workload gets its own unbounded input queue ('TQueue') and bounded +-- payload queue ('TBQueue', capacity 8192). The input queue is unbounded so +-- that bulk-loading initial inputs and recycling outputs never block; the +-- payload queue is the sole source of backpressure in the pipeline. Initial +-- inputs are partitioned equally across workloads (contiguous chunks; the last +-- workload absorbs the remainder). All targets within a workload share the +-- same queues. Spawning builders is the caller's responsibility. +module Cardano.Benchmarking.PullFiction.Config.Runtime + ( -- * Runtime. + Runtime + , config, builders, workloads + -- * Pipe. + , Pipe + , pipeInputQueue, pipePayloadQueue, pipeRecycle + -- * Builder. + , Builder + , parsedBuilder, builderName, builderPipe + -- * OnExhaustion. + , Raw.OnExhaustion (..) + -- * Workload. + , Workload + , workloadName, targets + -- * Target. + , Target + , targetName, targetPipe + , rateLimiter, maxBatchSize, onExhaustion + , targetAddr, targetPort + -- * Resolution. + , resolve + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Foldable (foldlM, toList) +import Numeric.Natural (Natural) +---------------- +-- containers -- +---------------- +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +--------------------- +-- pull-fiction -- +--------------------- +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw +import Cardano.Benchmarking.PullFiction.Config.Validated qualified as Validated +import Cardano.Benchmarking.PullFiction.Internal.RateLimiter qualified as RL + +-------------------------------------------------------------------------------- + +-- | Fully resolved top-level configuration. +-- +-- Carries the original 'Validated.Config' (for fields like initial inputs +-- that need no resolution) and the resolved workloads. +data Runtime input payload = Runtime + { -- | The original validated configuration. + config :: !(Validated.Config input) + -- | One builder per workload. The builder sits between the input queue + -- and the payload queue: it pulls inputs, produces payloads, and enqueues + -- @(payload, [input])@ pairs for workers to deliver. + -- + -- The list order matches the alphabetical workload name order (same as + -- 'Map.elems' on 'workloads'). + , builders :: [Builder input payload] + -- | Resolved workloads, keyed by workload name. + , workloads :: !(Map String (Workload input payload)) + } + +-- | Pipeline queues for a workload. +-- +-- Holds the input queue, payload queue, and recycling action. +-- All targets within the same workload share the same 'Pipe' instance. +data Pipe input payload = Pipe + { -- | Input queue feeding the builder. The builder reads inputs from here + -- to produce payloads. + -- + -- Unbounded ('TQueue'): input queues must never block on write. At + -- startup, all initial inputs are bulk-loaded before any consumer is + -- running; a bounded queue would deadlock when the initial input count + -- exceeds the capacity. During steady-state, 'pipeRecycle' writes recycled + -- inputs back here after each delivery; if this queue were bounded, a + -- burst of recycled inputs could stall the worker thread inside STM. The + -- payload queue is the only bounded queue in the pipeline and provides all + -- the backpressure the builder needs. + pipeInputQueue :: !(STM.TQueue input) + -- | Payload queue: the builder writes @(payload, [input])@ pairs here; + -- workers read from here via the rate-limited fetcher in + -- "Cardano.Benchmarking.PullFiction.WorkloadRunner". Bounded ('TBQueue', + -- capacity 8192); the sole source of backpressure in the pipeline. + , pipePayloadQueue :: !(STM.TBQueue (payload, [input])) + -- | Recycle consumed inputs back to 'pipeInputQueue' after delivery. + -- Returns an STM action so callers can compose it with other transactions. + -- + -- NOTE: recycling happens on /delivery/, not on downstream /confirmation/. + -- This is by design: the pipeline operates in closed-loop mode where + -- consumed inputs are immediately available for the next payload. For + -- example, in a Cardano deployment the node may later drop a submitted + -- transaction from its mempool (e.g. due to rollback or mempool overflow), + -- causing the recycled inputs to reference UTxOs that do not exist + -- on-chain. This is an accepted trade-off: it enables indefinite-duration + -- runs without pre-generating all payloads, at the cost of assuming that + -- delivered payloads will eventually be confirmed. + -- + -- Because the input queue is unbounded ('TQueue'), 'pipeRecycle' never + -- blocks regardless of how many inputs are returned in a single batch. + , pipeRecycle :: [input] -> STM.STM () + } + +-- | Builder resources for one workload. +-- +-- A 'Builder' is a 'Pipe' paired with a 'Raw.Builder' that describes the +-- payload shape for the workload. The builder pulls inputs from +-- 'pipeInputQueue', produces a payload, and enqueues the @(payload, [input])@ +-- pair to 'pipePayloadQueue' for workers to deliver. There is exactly one +-- 'Builder' per workload. +data Builder input payload = Builder + { -- | Resolved builder for this workload. + parsedBuilder :: !Raw.Builder + -- | Unique name to be able to label builders. + -- Now, as it is one 'Builder' per 'Workload', it's the 'Workload' name. + , builderName :: String + -- | Pipeline queues for this workload. The builder reads inputs from + -- 'pipeInputQueue' and writes @(payload, [input])@ pairs to + -- 'pipePayloadQueue'. All targets within the workload share this same + -- 'Pipe'. + , builderPipe :: !(Pipe input payload) + } + +-- | Fully resolved workload. +-- +-- All cascading defaults have been applied, rate limiters and pipeline queues +-- have been created for each target. +-- +-- The builder resources ('Pipe' and 'Raw.Builder') live in 'Builder' on the +-- 'Runtime', not here. Each workload has exactly one corresponding 'Builder' +-- (same list order as 'Map.elems' on 'workloads'). +data Workload input payload = Workload + { -- | Unique name identifying this workload. + workloadName :: !String + -- | Resolved targets, keyed by target name. Each target carries its config, + -- rate limiter, and pipeline queues. + , targets :: !(Map String (Target input payload)) + } + +-- | A fully resolved target with rate limiter and pipeline. +-- +-- All targets within the same workload share the same 'Pipe' (same underlying +-- queues). Targets with the same 'Validated.rateLimitKey' also share the same +-- 'RL.RateLimiter' (same TVars). +data Target input payload = Target + { -- | Unique name identifying this target. + targetName :: !String + -- | Pipeline queues for this target. + -- + -- All targets within the same workload share the same 'Pipe' instance (same + -- underlying queues). + , targetPipe :: !(Pipe input payload) + -- | Pre-created rate limiter for this target. + -- + -- Targets with the same 'Validated.rateLimitKey' share the same + -- 'RL.RateLimiter' (same TVars); targets with unique keys each get their + -- own. + , rateLimiter :: !RL.RateLimiter + -- | Resolved max tokens per request for this target. + , maxBatchSize :: !Natural + -- | What to do when the payload queue is exhausted. + , onExhaustion :: !Raw.OnExhaustion + -- | IP address or hostname of the target endpoint. + , targetAddr :: !String + -- | Port number of the target endpoint. + , targetPort :: !Int + } + +instance Show (Target input payload) where + showsPrec _ t = showString (targetName t) + +instance Eq (Target input payload) where + a == b = targetName a == targetName b + +-------------------------------------------------------------------------------- +-- Resolution. +-------------------------------------------------------------------------------- + +-- | Limiter cache: maps a sharing key to an already-created rate limiter. +-- +-- Threaded across workloads so that top-level Shared limiters are reused. +type LimiterCache = Map String RL.RateLimiter + +-- | Resolve a parsed 'Validated.Config' into a 'Runtime' by creating rate +-- limiters and setting up pipeline queues. +-- +-- Initial inputs are taken from 'Validated.initialInputs' (provided by the +-- caller to 'Validated.validate') and partitioned equally across workloads +-- chunks; the last workload absorbs the remainder). +-- +-- Each workload gets its own unbounded input queue ('TQueue') and bounded +-- payload queue ('TBQueue', capacity 8192). The input queue is unbounded so +-- that bulk-loading initial inputs and recycling outputs never block; the +-- payload queue is the sole source of backpressure in the pipeline. Spawning +-- builders is the caller's responsibility; each 'Builder' exposes 'pipe' for +-- access to the pipeline queues. +-- +-- All validation and cascading has been done by +-- "Cardano.Benchmarking.PullFiction.Config.Validated". +resolve :: Validated.Config input -> IO (Runtime input payload) +resolve validatedConfig = do + let validatedWorkloadsMap = Validated.workloads validatedConfig + -- Distribute initial inputs equally across workloads, keyed by workload name. + -- Both Maps share the same ascending key order, so zip + fromAscList is safe. + let inputsByWorkloadMap = + let workloadsCount = Map.size validatedWorkloadsMap + inputChunks = partitionInputs + workloadsCount + (toList (Validated.initialInputs validatedConfig)) + in Map.fromAscList + (zip (Map.keys validatedWorkloadsMap) inputChunks) + -- Resolve builders first: each builder creates its own Pipe (input queue, + -- payload queue, recycle action) and loads initial inputs. + resolvedBuilders <- mapM + (\validatedWorkload -> do + let workloadInputs = inputsByWorkloadMap Map.! Validated.workloadName validatedWorkload + workloadBuilder = Validated.builder validatedWorkload + resolveBuilder validatedWorkload workloadBuilder workloadInputs + ) + validatedWorkloadsMap + -- Resolve workloads: assign the pipe from each builder to its targets and + -- resolve each target's rate limiter. The limiter cache is threaded as a + -- pure accumulator so that top-level Shared limiters are reused. + (resolvedWorkloads, _) <- + foldlM + (\(acc, cache) (wlName, validatedWorkload) -> do + let resolvedBuilder = resolvedBuilders Map.! wlName + (resolved, cache') <- + resolveWorkload validatedWorkload cache (builderPipe resolvedBuilder) + pure (Map.insert wlName resolved acc, cache') + ) + (Map.empty, Map.empty) + (Map.toAscList validatedWorkloadsMap) + -- Assemble the final runtime. + pure Runtime + { -- The previous state for reference. + config = validatedConfig + -- One builder per workload (alphabetical order, same as Map.elems). + , builders = Map.elems resolvedBuilders + -- Map String Validated -> Map String Runtime. + , workloads = resolvedWorkloads + } + +-------------------------------------------------------------------------------- +-- Builder resolution. +-------------------------------------------------------------------------------- + +-- | Create the builder resources for a single workload: input queue, payload +-- queue, recycle action, and initial input loading. +-- +-- The input queue is unbounded ('TQueue') so that bulk-loading initial inputs +-- and recycling outputs never block. See 'Pipe' for the full rationale. +-- +-- The payload queue is bounded ('TBQueue', capacity 8192); the sole source of +-- backpressure. The builder blocks here when workers cannot consume fast +-- enough. +-- +-- The returned 'Pipe' is shared with all targets in the workload; this is how +-- closed-loop recycling and queue sharing work. +resolveBuilder + :: Validated.Workload + -> Raw.Builder + -- | Initial inputs for this workload. + -> [input] + -> IO (Builder input payload) +resolveBuilder validatedWorkload validatedBuilder initialInputs = do + -- Input queue: unbounded (TQueue) so that bulk-loading initial inputs and + -- recycling outputs never block. See 'Pipe' for the full rationale. + inputQueue <- STM.newTQueueIO + STM.atomically $ mapM_ (STM.writeTQueue inputQueue) initialInputs + -- Payload queue: bounded (TBQueue, capacity 8192); the sole source of + -- backpressure. The builder blocks here when workers cannot consume fast + -- enough. The capacity must be large enough to absorb GC pauses at high TPS + -- (e.g. 100k TPS drains 256 entries in ~2.5 ms). + payloadQ <- STM.newTBQueueIO 8192 + let thePipe = Pipe + { pipeInputQueue = inputQueue + , pipePayloadQueue = payloadQ + -- pipeRecycle: write recycled inputs back to the unbounded input + -- queue. Because TQueue has no capacity limit, this can never stall + -- the worker thread inside STM. + , pipeRecycle = \is -> mapM_ (STM.writeTQueue inputQueue) is + } + pure Builder + { parsedBuilder = validatedBuilder + , builderName = Validated.workloadName validatedWorkload + , builderPipe = thePipe + } + +-------------------------------------------------------------------------------- +-- Workload resolution. +-------------------------------------------------------------------------------- + +-- | Resolve a single workload: assign the pre-created 'Pipe' to each target +-- and resolve each target's rate limiter. +-- +-- The 'Pipe' is created by 'resolveBuilder' and passed in so that the 'Builder' +-- and all targets share the same underlying queues. +-- +-- Cascading defaults and conflict checks have already been performed by +-- "Cardano.Benchmarking.PullFiction.Config.Validated"; this function only +-- creates rate limiters. +resolveWorkload + :: Validated.Workload + -- | Limiter cache (threaded as a pure accumulator). + -> LimiterCache + -- | Pipe for this workload (created by 'resolveBuilder'). + -> Pipe input payload + -> IO (Workload input payload, LimiterCache) +resolveWorkload validatedWorkload cache0 thePipe = do + let wlName = Validated.workloadName validatedWorkload + validatedTargets = Validated.targets validatedWorkload + (resolvedTargets, cache') <- + foldlM + (\(acc, cache) (tName, validatedTarget) -> do + (resolved, cache'') <- + resolveTarget cache thePipe validatedTarget + pure (Map.insert tName resolved acc, cache'') + ) + (Map.empty, cache0) + (Map.toAscList validatedTargets) + pure ( Workload { workloadName = wlName, targets = resolvedTargets } + , cache' + ) + +-------------------------------------------------------------------------------- +-- Target resolution. +-------------------------------------------------------------------------------- + +-- | Resolve a single target: look up or create its rate limiter from the +-- cache, then build the 'Target' record. +resolveTarget + :: LimiterCache + -> Pipe input payload + -> Validated.Target + -> IO (Target input payload, LimiterCache) +resolveTarget cache thePipe validatedTarget = do + (limiter, cache') <- getOrCreateLimiter cache validatedTarget + pure ( Target + { targetName = Validated.targetName validatedTarget + , targetPipe = thePipe + , rateLimiter = limiter + , maxBatchSize = Validated.targetMaxBatchSize validatedTarget + , onExhaustion = Validated.onExhaustion validatedTarget + , targetAddr = Validated.addr validatedTarget + , targetPort = Validated.port validatedTarget + } + , cache' + ) + +-- | Look up or create a 'RL.RateLimiter' for a target. +-- +-- * 'Nothing' rate limit source → 'RL.newUnlimited' (no cache entry). +-- * Otherwise, use the pre-computed 'Validated.rateLimitKey' as the cache +-- key. If the key already exists the existing limiter is reused; otherwise a +-- 'RL.newTokenBucket' is created and inserted. +getOrCreateLimiter + :: LimiterCache -> Validated.Target + -> IO (RL.RateLimiter, LimiterCache) +getOrCreateLimiter cache target = + case Validated.rateLimitSource target of + Nothing -> pure (RL.newUnlimited, cache) + Just src -> do + let key = Validated.rateLimitKey src + tpsValue = Raw.tps (Validated.rateLimit src) + case Map.lookup key cache of + Just existing -> pure (existing, cache) + Nothing -> do + limiter <- RL.newTokenBucket tpsValue + pure (limiter, Map.insert key limiter cache) + +-------------------------------------------------------------------------------- +-- Input partitioning. +-------------------------------------------------------------------------------- + +-- | Split a list into @n@ contiguous chunks of roughly equal size. +-- The last chunk absorbs any remainder. +partitionInputs :: Int -> [a] -> [[a]] +partitionInputs n xs + | n <= 1 = [xs] + | otherwise = go xs n + where + chunkSize = length xs `div` n + go remaining 1 = [remaining] + go remaining k = + let (chunk, rest) = splitAt chunkSize remaining + in chunk : go rest (k - 1) diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Validated.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Validated.hs new file mode 100644 index 00000000000..a59a7783982 --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Config/Validated.hs @@ -0,0 +1,375 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +-------------------------------------------------------------------------------- + +-- | Validated load-generator configuration with cascading defaults applied. +-- +-- Types mirror "Cardano.Benchmarking.PullFiction.Config.Raw" but with +-- hidden data constructors. The only way to obtain values is through +-- 'validate', which guarantees that every value has passed validation (e.g. +-- @tps > 0@, @max_batch_size >= 1@, valid names). +-- +-- Cascading defaults are resolved here: +-- +-- * @builder@: setting it at both the top level and the workload level is an +-- error; otherwise workload value > top level value > error. +-- * @rate_limit@: setting it at both the top level and the workload level is an +-- error; otherwise the workload inherits the top-level value (or 'Nothing' +-- for unlimited). +-- * @max_batch_size@: target value > workload value > top-level value > +-- default (1). +-- * @on_exhaustion@: target value > workload value > top-level value > +-- default (@\"block\"@). +-- +-- After 'validate', every 'Target' has a concrete @targetMaxBatchSize@ and +-- every 'Workload' has a concrete @builder@ (no 'Maybe'). +-- +-- 'Workload' and 'Config' store their children in 'Map's keyed by name +-- (alphabetical order; JSON object key order is not preserved). +-- +-- __Import qualified.__ Field names clash with +-- "Cardano.Benchmarking.PullFiction.Config.Raw" and +-- "Cardano.Benchmarking.PullFiction.Config.Runtime". +module Cardano.Benchmarking.PullFiction.Config.Validated + ( + -- * Config. + Config + , initialInputs, workloads + + -- * RateLimitSource. + , RateLimitSource (..) + + -- * Workload. + , Workload + , workloadName, builder, targets + + -- * Target. + , Target + , targetName, rateLimitSource, targetMaxBatchSize, onExhaustion + , addr, port + + -- * Validation. + , validate + + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Monad (when) +import Data.List.NonEmpty (NonEmpty) +import Data.Maybe (fromMaybe) +import Numeric.Natural (Natural) +---------------- +-- containers -- +---------------- +import Data.Map.Strict (Map) +import Data.Map.Strict qualified as Map +--------------------- +-- pull-fiction -- +--------------------- +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw + +-------------------------------------------------------------------------------- +-- Defaults. +-------------------------------------------------------------------------------- + +-- | Default scope for a top-level rate limiter when not specified in JSON. +defaultTopLevelScope :: Raw.TopLevelScope +defaultTopLevelScope = Raw.TopShared + +-- | Default scope for a workload-level rate limiter when not specified in JSON. +defaultWorkloadScope :: Raw.WorkloadScope +defaultWorkloadScope = Raw.WorkloadShared + +-- | Default maximum batch size when neither the workload nor the top-level +-- config specifies one. +defaultMaxBatchSize :: Natural +defaultMaxBatchSize = 1 + +-- | Default on-exhaustion behaviour when not specified at any level. +defaultOnExhaustion :: Raw.OnExhaustion +defaultOnExhaustion = Raw.Block + +-------------------------------------------------------------------------------- + +-- | Top-level configuration. +-- +-- See 'Raw.Config' for field semantics. All invariants have been checked and +-- cascading defaults applied by 'validate'. +data Config input = Config + { -- | Initial inputs provided by the caller and stored by 'validate'. + initialInputs :: !(NonEmpty input) + -- | Workloads keyed by name. Iteration order is alphabetical (Map order). + , workloads :: !(Map String Workload) + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- + +-- | Resolved rate limit for a target, with its sharing key pre-computed. +-- +-- The 'rateLimitKey' encodes the sharing boundary using fully qualified names: +-- +-- * @\@global@: one limiter shared by all targets across all workloads. +-- * @workloadName@: one limiter per workload (each shared by all its targets). +-- * @workloadName.targetName@: one limiter per target. +-- +-- Because workload and target names may not start with @\@@ or contain @.@ +-- (enforced at validation time), these keys are guaranteed to be unique. +data RateLimitSource = RateLimitSource + { -- | Cache key for limiter sharing (the fully qualified name). + rateLimitKey :: !String + -- | Validated rate limit parameters. + , rateLimit :: !Raw.RateLimit + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- + +-- | A single workload with cascading defaults applied. +-- +-- 'builder' is always concrete (no 'Maybe'); cascading from the top level +-- config is performed by 'validate'. +data Workload = Workload + { workloadName :: !String + -- | Resolved builder: workload value > top level value. + -- Opaque; interpretation is the caller's responsibility. + , builder :: !Raw.Builder + -- | Targets keyed by name. Iteration order is alphabetical. + , targets :: !(Map String Target) + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- + +-- | A target endpoint to connect to. +-- +-- 'targetMaxBatchSize' and 'onExhaustion' are concrete (no 'Maybe'). Cascading +-- defaults have been applied by 'validate'. +data Target = Target + { targetName :: !String + -- | Resolved rate limit source ('Nothing' means unlimited). + , rateLimitSource :: !(Maybe RateLimitSource) + -- | Resolved max batch size: target value > workload value > top-level value + -- > default. + , targetMaxBatchSize :: !Natural + -- | Resolved on-exhaustion behaviour: target value > workload value > + -- top-level value > 'Raw.Block'. + , onExhaustion :: !Raw.OnExhaustion + , addr :: !String + , port :: !Int + } + deriving (Show, Eq) + +-------------------------------------------------------------------------------- +-- Validation. +-------------------------------------------------------------------------------- + +-- | Validate a 'Raw.Config', enforce all business rules, and cascade top-level +-- defaults into workloads. +-- +-- Input loading is the caller's responsibility; passes the already-loaded +-- inputs directly. This keeps the validation layer pure and decouples it from +-- IO concerns like key loading and network magic interpretation. +-- +-- Returns 'Left' with a descriptive error message on the first violation. +validate + -- | Raw configuration as parsed from JSON. + :: Raw.Config + -- | Initial inputs (already loaded by the caller). + -> NonEmpty input + -> Either String (Config input) +validate raw inputs = do + -- Builder (opaque; passed through without interpretation). + let maybeTopBuilder = Raw.maybeTopLevelBuilder raw + -- Rate limit. + maybeTopRateLimit <- case Raw.maybeTopLevelRateLimit raw of + Nothing -> pure Nothing + Just (maybeTopScope, rawRL) -> do + validatedRL <- validateRateLimit rawRL + let topScope = fromMaybe defaultTopLevelScope maybeTopScope + pure (Just (topScope, validatedRL)) + -- Max batch size. + let topMaxBatchSize = fromMaybe + defaultMaxBatchSize + (Raw.maybeTopLevelMaxBatchSize raw) + when (topMaxBatchSize == 0) $ + Left "Config: max_batch_size must be >= 1" + -- On-exhaustion behaviour. + let topOnExhaustion = fromMaybe + defaultOnExhaustion + (Raw.maybeTopLevelOnExhaustion raw) + -- Workloads. + when (Map.null (Raw.workloads raw)) $ + Left "Config: workloads must not be empty" + workloadsMap <- Map.traverseWithKey + (\wName workload -> validateWorkload + wName maybeTopBuilder maybeTopRateLimit topMaxBatchSize topOnExhaustion workload + ) + (Raw.workloads raw) + -- Inputs must cover all workloads: Runtime.partitionInputs splits them into + -- contiguous chunks, so fewer inputs than workloads leaves some with zero. + let inputCount = length inputs + when (inputCount < Map.size workloadsMap) $ + Left $ "Config: not enough initial inputs (" ++ show inputCount + ++ ") for " ++ show (Map.size workloadsMap) ++ " workload(s)" + -- Final validated config. + pure Config + { initialInputs = inputs + , workloads = workloadsMap + } + +-------------------------------------------------------------------------------- + +validateWorkload + -- | Workload name (from Map key). + :: String + -- | Top level builder (opaque). + -> Maybe Raw.Builder + -- | Validated top-level scope / rate limit. + -> Maybe (Raw.TopLevelScope, Raw.RateLimit) + -- | Resolved top-level max batch size. + -> Natural + -- | Resolved top-level on-exhaustion behaviour. + -> Raw.OnExhaustion + -> Raw.Workload + -> Either String Workload +validateWorkload name maybeTopBuilder maybeTopRateLimit topMaxBatchSize topOnExhaustion rawWorkload = do + -- Name. + validateName "Workload" name + -- Builder conflict: setting at both levels is ambiguous. + case (maybeTopBuilder, Raw.maybeBuilder rawWorkload) of + (Just _, Just _) -> + Left $ "builder set at both the top level and in workload: " ++ show name + _ -> pure () + -- Resolve builder: workload level > top level > error. + resolvedBuilder <- case Raw.maybeBuilder rawWorkload of + Just parsedBuilder -> pure parsedBuilder + Nothing -> case maybeTopBuilder of + Just topLevelBuilder -> pure topLevelBuilder + Nothing -> Left $ + "Workload " ++ show name + ++ ": builder is required (no workload or top level default)" + -- Rate-limit conflict: setting at both levels is ambiguous. + case (maybeTopRateLimit, Raw.maybeRateLimit rawWorkload) of + (Just _, Just _) -> + Left $ + "rate_limit is set at both the top level and in workload: " ++ show name + _ -> pure () + -- Resolve effective rate limit: workload-level > top-level > unlimited. + -- The scope and validated rate limit are cascaded to validateTarget, which + -- computes the final RateLimitSource (including the cache key). + effectiveRateLimit <- case Raw.maybeRateLimit rawWorkload of + Just (maybeWlScope, rawRL) -> do + validatedRL <- validateRateLimit rawRL + let wlScope = fromMaybe defaultWorkloadScope maybeWlScope + pure (Just (Right wlScope, validatedRL)) + Nothing -> case maybeTopRateLimit of + Just (topScope, topRL) -> + pure (Just (Left topScope, topRL)) + Nothing -> + pure Nothing + -- Cascade max_batch_size: workload > top-level (always concrete). + -- The per-target override is applied inside validateTarget. + case Raw.maybeMaxBatchSize rawWorkload of + Just 0 -> Left "Workload: max_batch_size must be >= 1" + _ -> pure () + let workloadBatchSize = fromMaybe + topMaxBatchSize + (Raw.maybeMaxBatchSize rawWorkload) + -- Cascade on_exhaustion: workload > top-level. + let workloadOnExhaustion = fromMaybe + topOnExhaustion + (Raw.maybeOnExhaustion rawWorkload) + -- Targets. + when (Map.null (Raw.targets rawWorkload)) $ + Left $ "Workload " ++ show name ++ ": targets must not be empty" + targetsMap <- Map.traverseWithKey + (\tName target -> validateTarget + name tName effectiveRateLimit workloadBatchSize workloadOnExhaustion target + ) + (Raw.targets rawWorkload) + -- Final validated workload. + pure Workload + { workloadName = name + , builder = resolvedBuilder + , targets = targetsMap + } + +validateTarget + :: String -- ^ Workload name (for cache key computation). + -> String -- ^ Target name (from Map key). + -> Maybe (Either Raw.TopLevelScope Raw.WorkloadScope, Raw.RateLimit) + -> Natural -- ^ Resolved max batch size. + -> Raw.OnExhaustion -- ^ Resolved on-exhaustion behaviour. + -> Raw.Target + -> Either String Target +validateTarget wlName tgtName effectiveRateLimit workloadBatchSize workloadOnExhaustion rawTarget = do + -- Name. + validateName "Target" tgtName + -- Resolve rate limit source with pre-computed cache key. + -- The key scheme uses fully-qualified names: + -- @global → one limiter for everything + -- workloadName → one per workload + -- workloadName.target → one per target + let maybeRateLimitSource = case effectiveRateLimit of + Nothing -> Nothing + Just (scope, rl) -> Just $ case scope of + Left Raw.TopShared -> RateLimitSource "@global" rl + Left Raw.TopPerWorkload -> RateLimitSource wlName rl + Left Raw.TopPerTarget -> RateLimitSource (wlName ++ "." ++ tgtName) rl + Right Raw.WorkloadShared -> RateLimitSource wlName rl + Right Raw.WorkloadPerTarget -> RateLimitSource (wlName ++ "." ++ tgtName) rl + -- Cascade max_batch_size: target > workload (always concrete). + case Raw.maybeTargetMaxBatchSize rawTarget of + Just 0 -> Left $ + "Target " ++ show tgtName + ++ ": max_batch_size must be >= 1" + _ -> pure () + let resolvedBatchSize = fromMaybe + workloadBatchSize + (Raw.maybeTargetMaxBatchSize rawTarget) + -- Cascade on_exhaustion: target > workload (always concrete). + let resolvedOnExhaustion = fromMaybe + workloadOnExhaustion + (Raw.maybeTargetOnExhaustion rawTarget) + -- Final validated target. + pure Target + { targetName = tgtName + , rateLimitSource = maybeRateLimitSource + , targetMaxBatchSize = resolvedBatchSize + , onExhaustion = resolvedOnExhaustion + , addr = Raw.addr rawTarget + , port = Raw.port rawTarget + } + +-------------------------------------------------------------------------------- + +validateRateLimit :: Raw.RateLimit -> Either String Raw.RateLimit +validateRateLimit rl@(Raw.TokenBucket rawTps) = do + when (isNaN rawTps) $ + Left "RateLimit: tps must be a number, got NaN" + when (isInfinite rawTps) $ + Left "RateLimit: tps must be finite" + when (rawTps <= 0) $ + Left "RateLimit: tps must be > 0" + pure rl + +-- | Validate that a name does not start with @\'@\'@ or contain @\'.\'@. +-- +-- These characters are reserved for the rate-limit cache key scheme +-- (see 'RateLimitSource'). +validateName :: String -> String -> Either String () +validateName context name = do + case name of + [] -> + Left $ context ++ ": name must be non-empty" + ('@':_) -> + Left $ context ++ ": name must not start with '@', got " ++ show name + _ -> pure () + when ('.' `elem` name) $ + Left $ context ++ ": name must not contain '.', got " ++ show name + diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Internal/RateLimiter.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Internal/RateLimiter.hs new file mode 100644 index 00000000000..aad497aa142 --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/Internal/RateLimiter.hs @@ -0,0 +1,247 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +-- | Server-side rate limiter for pull-based token dispensing. +-- +-- In traffic control terminology, a /rate limiter/ (or /policer/) enforces a +-- maximum admission rate on incoming requests. It is /reactive/: it does not +-- initiate or schedule transmissions; it responds to each request by either +-- admitting or delaying it against a configured ceiling. This contrasts with a +-- /traffic shaper/ (or /pacer/), which sits on the sender side and proactively +-- schedules outgoing emissions (RFC 2475, s. 2.3.3.3). +-- +-- In a pull-based system the downstream consumer drives the conversation by +-- requesting tokens when it has capacity (e.g. Cardano's TxSubmission2 +-- mini-protocol, where the node pulls transactions when its mempool has room). +-- The 'RateLimiter' enforces a tokens-per-second ceiling on dispensed tokens, +-- ensuring the generator does not exceed the configured rate regardless of how +-- aggressively or unevenly consumers poll. Because the generator never pushes, +-- sender-side shaping is not applicable; the appropriate discipline is +-- receiver-side rate limiting. +-- +-- This module computes delays but never sleeps. Sleeping is the caller's +-- responsibility (the rate-limited fetcher in +-- "Cardano.Benchmarking.PullFiction.WorkloadRunner" applies the delay via +-- 'threadDelay' outside the STM transaction), keeping the STM critical section +-- short and the rate limiter testable in pure STM. +-- +-- The 'TBQueue' is supplied as an explicit parameter to 'waitToken' and +-- 'tryWaitToken', so that queue reads and rate-limit accounting happen in a +-- single atomic STM transaction while keeping the limiter decoupled from any +-- particular queue. +module Cardano.Benchmarking.PullFiction.Internal.RateLimiter + ( RateLimiter, newTokenBucket, newUnlimited + , waitToken, tryWaitToken + ) where + +-------------------------------------------------------------------------------- + +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +--------------------- +-- pull-fiction -- +--------------------- +import Cardano.Benchmarking.PullFiction.Clock qualified as Clock + +-------------------------------------------------------------------------------- + +-- | A rate limiter for pull-based (server-side) token dispensing. +-- +-- Two constructors are provided: 'TokenBucket' for a configured TPS ceiling, +-- and 'Unlimited' for unconstrained throughput. +data RateLimiter + = TokenBucket + -- | Emission interval T in nanoseconds (cached). + !Integer + -- | Start time (set on first claim). + !(STM.TVar (Maybe Clock.TimeSpec)) + -- | Tokens sent so far. + !(STM.TVar Integer) + -- | No rate limit. + | Unlimited + +-- | Create a token-bucket rate limiter targeting @tps@ tokens per second. +-- +-- Uses the Generic Cell Rate Algorithm (GCRA), also known as the virtual +-- scheduling algorithm (ITU-T I.371). Equivalent to Turner's leaky bucket as +-- a meter (Turner 1986, "New Directions in Communications", IEEE Comm. Mag. +-- 24(10)). +-- +-- The algorithm tracks a /Theoretical Arrival Time/ (TAT), the earliest +-- time the next token is allowed: +-- +-- @ +-- TAT(0) = now -- first token, no delay +-- TAT(N+1) = max(TAT(N), now) + T -- T = emission interval = 1\/rate +-- allow iff TAT <= now + τ -- τ = burst tolerance +-- @ +-- +-- With @τ = 0@ (the current implementation) no burst is allowed: each token +-- must wait until its scheduled time. Adding @τ > 0@ would permit up to +-- @τ / T@ tokens to arrive ahead of schedule (the dual token-bucket +-- formulation with bucket depth @τ / T@). +-- +-- TODO: Add a @maxBurst@ parameter to the rate limit config. The burst +-- tolerance becomes @τ = maxBurst * T@, and the admission check becomes +-- @TAT <= now + τ@. +-- +-- The start time is captured on the first token claim, so any delay between +-- limiter creation and the first request does not cause a burst of catch-up +-- tokens. +-- +-- Performance: @nanosPerToken@ (the emission interval @T@) is pre-computed +-- once at construction via @round (1e9 / tps)@. This trades a tiny rounding +-- error (at most +/-0.5 ns per token) for O(1) integer multiplication in +-- 'nextTokenTargetTime', avoiding 'Rational' division that would otherwise +-- dominate at high token counts. +newTokenBucket :: Double -> IO RateLimiter +newTokenBucket tps = do + startVar <- STM.newTVarIO Nothing + countVar <- STM.newTVarIO 0 + let !nanosPerToken = round (1_000_000_000 / tps) :: Integer + pure (TokenBucket nanosPerToken startVar countVar) + +-- | An unlimited rate limiter (never blocks on rate). +newUnlimited :: RateLimiter +newUnlimited = Unlimited + +-------------------------------------------------------------------------------- + +-- | Compute the target time for the next token given a pre-computed +-- nanoseconds-per-token interval. +-- +-- @tokensSent@ is the number of tokens already dispatched. Token 0 is handled +-- by the first-call special case in 'waitToken' (dispatched immediately at +-- @startTime@, delay 0). For all subsequent tokens, @tokensSent@ equals the +-- 0-indexed position of the next token: token 1 has @tokensSent = 1@, so its +-- target time is @startTime + 1 * T@. In general: +-- +-- @ +-- targetTime(N) = startTime + N * nanosPerToken +-- @ +-- +-- This is a single O(1) integer multiply + add; see the performance note on +-- 'TokenBucket' for the precision/performance trade-off. +nextTokenTargetTime :: Integer -> Clock.TimeSpec -> Integer -> Clock.TimeSpec +nextTokenTargetTime nanosPerToken startTime tokensSent = + let !offset = Clock.fromNanoSecs (tokensSent * nanosPerToken) + in startTime + offset + +-------------------------------------------------------------------------------- + +-- | Try to claim the next token. Runs entirely in STM, never retries. +-- +-- The 'TBQueue' is passed as a parameter so the caller controls which queue is +-- read; the rate limiter only tracks rate-limiting state. +-- +-- Returns @Just (token, delay)@ when a token is available, where @delay@ is how +-- long the caller should sleep to respect the TPS rate (zero when behind +-- schedule). Returns 'Nothing' when the queue is empty; the caller (typically +-- the rate-limited fetcher in +-- "Cardano.Benchmarking.PullFiction.WorkloadRunner") is responsible for +-- sleeping and retrying. +-- +-- __Fairness property__: the token is consumed and the rate-limit slot is +-- claimed in a single atomic STM transaction. This means that once a thread +-- obtains token N, no other thread can obtain an /earlier/ slot: the delay +-- assigned to token N is always <= the delay for token N+1. Threads that enter +-- concurrently are serialised by STM; each sees a strictly increasing +-- @tokensSent@ counter. The consume-before-delay order therefore provides +-- FIFO-fair scheduling: the thread that wins the STM commit gets the earliest +-- available slot, and no later arrival can jump ahead of it. +-- +-- By never blocking inside STM the @timeNow@ timestamp (captured by the caller +-- just before entering 'STM.atomically') stays accurate, which prevents +-- stale-clock TPS drift that would occur if the transaction retried while waiting +-- for a queue write. +waitToken :: Clock.TimeSpec + -> RateLimiter + -> STM.TBQueue token + -> STM.STM (Maybe (token, Clock.TimeSpec)) +-- No TPS: try to read a token without blocking. +waitToken _ Unlimited queue = do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure Nothing + Just token -> pure (Just (token, 0)) +-- With a TPS. +waitToken timeNow (TokenBucket nanosPerToken startTVar countTVar) queue = do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure Nothing + Just token -> do + maybeStartTime <- STM.readTVar startTVar + case maybeStartTime of + -- Rate limiter running, claim a rate-limit slot. + Just startTime -> do + tokensSent <- STM.readTVar countTVar + STM.writeTVar countTVar (tokensSent + 1) + let !targetTime = nextTokenTargetTime + nanosPerToken startTime tokensSent + !delay = max 0 (targetTime - timeNow) + pure (Just (token, delay)) + -- First call, record start time. + Nothing -> do + STM.writeTVar startTVar (Just timeNow) + STM.writeTVar countTVar 1 + pure (Just (token, 0)) + +-- | Non-blocking token request with rate-limit check. Runs entirely in STM. +-- +-- The 'TBQueue' is passed as a parameter so the caller controls which queue is +-- read; the rate limiter only tracks rate-limiting state. +-- +-- Unlike 'waitToken' (which always tries to read a token), this function checks +-- the rate limit /first/ and returns @Left delay@ without touching the queue +-- when ahead of schedule. This is the primary path for non-blocking callers +-- that should not consume tokens faster than the target TPS. +-- +-- Returns: +-- +-- * @Left delay@ when rate-limited (ahead of schedule), where @delay@ is how +-- long until the next token slot. +-- * @Right Nothing@ when not rate-limited but the queue is empty. +-- * @Right (Just token)@ when not rate-limited and a token was claimed. +tryWaitToken :: Clock.TimeSpec + -> RateLimiter + -> STM.TBQueue token + -> STM.STM (Either Clock.TimeSpec (Maybe token)) +-- No TPS. +tryWaitToken _ Unlimited queue = Right <$> STM.tryReadTBQueue queue +-- With a TPS. +tryWaitToken timeNow + (TokenBucket nanosPerToken startTVar countTVar) + queue = do + maybeStartTime <- STM.readTVar startTVar + case maybeStartTime of + -- Rate limiter running, check if ahead of schedule. + Just startTime -> do + tokensSent <- STM.readTVar countTVar + let !targetTime = nextTokenTargetTime + nanosPerToken startTime tokensSent + if targetTime > timeNow + -- Ahead of schedule. + then pure (Left (targetTime - timeNow)) + -- Available headroom. + else do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure (Right Nothing) + Just token -> do + STM.writeTVar countTVar (tokensSent + 1) + pure (Right (Just token)) + -- First call, no rate limit to check. + Nothing -> do + maybeToken <- STM.tryReadTBQueue queue + case maybeToken of + Nothing -> pure (Right Nothing) + Just token -> do + -- Record the time only if a token was available. + STM.writeTVar startTVar (Just timeNow) + STM.writeTVar countTVar 1 + pure (Right (Just token)) diff --git a/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/WorkloadRunner.hs b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/WorkloadRunner.hs new file mode 100644 index 00000000000..6415bf173fd --- /dev/null +++ b/bench/tx-centrifuge/lib/pull-fiction/Cardano/Benchmarking/PullFiction/WorkloadRunner.hs @@ -0,0 +1,293 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.PullFiction.WorkloadRunner + ( TargetWorker, runWorkload + , QueueStarved(..) + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Concurrent (myThreadId, threadDelay) +import Control.Exception (Exception, throwIO) +import Control.Monad (when) +import GHC.Conc (labelThread) +----------- +-- async -- +----------- +import Control.Concurrent.Async qualified as Async +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +--------------------- +-- pull-fiction -- +--------------------- +import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw +import Cardano.Benchmarking.PullFiction.Config.Runtime qualified as Runtime +import Cardano.Benchmarking.PullFiction.Clock qualified as Clock +import Cardano.Benchmarking.PullFiction.Internal.RateLimiter qualified as RL + +-------------------------------------------------------------------------------- +-- Queue-starved exception. +-------------------------------------------------------------------------------- + +-- | Fatal exception thrown (in 'Raw.Error' on-exhaustion mode) when the payload +-- queue is empty and the rate limiter has authorized a fetch. This means the +-- payload builder cannot produce payloads fast enough for the configured TPS +-- demand. The caller must either reduce TPS, increase the number of initial +-- inputs, increase the payload queue capacity, or parallelise the builder. +-- +-- In 'Raw.Block' mode, 'blockingFetch' retries until a token becomes +-- available; 'nonBlockingFetch' returns 'Nothing'. +data QueueStarved = QueueStarved !String + deriving (Show) + +instance Exception QueueStarved + +-------------------------------------------------------------------------------- +-- RateLimitedFetcher. +-------------------------------------------------------------------------------- + +-- | Rate-limited fetch interface over caller-supplied queues. +-- +-- A 'RateLimitedFetcher' encapsulates rate-limiting state but has __no opinion +-- about which queue to read from__. The 'TBQueue' is a plain parameter on every +-- call, so the caller is free to pass: +-- +-- * the same shared queue on every call, +-- * a different per-target queue for each worker, +-- * or even a different queue from one call to the next. +-- +-- The fetcher never captures, stores, or inspects the queue; it only reads one +-- token from whatever queue it receives and applies the rate limit. +-- +-- Two fetch modes are provided for pull-based protocols that distinguish +-- between "I need at least one item" and "give me more if available": +-- +-- * 'blockingFetch': claims a rate-limit slot, sleeps for the required delay, +-- and returns the token. When the queue is empty, behaviour depends on the +-- 'Raw.OnExhaustion' mode: 'Raw.Error' throws 'QueueStarved'; 'Raw.Block' +-- retries until a token becomes available. +-- +-- * 'nonBlockingFetch': returns 'Nothing' when rate-limited (ahead of schedule) +-- or when the queue is empty in 'Raw.Block' mode. In 'Raw.Error' mode, throws +-- 'QueueStarved' if not rate-limited but the queue is empty. +data RateLimitedFetcher token = RateLimitedFetcher + { -- | Claim a rate-limit slot, sleep for the computed delay, return one token + -- from the given queue. Retries or throws on empty queue depending on the + -- 'Raw.OnExhaustion' mode. + blockingFetch :: STM.TBQueue token -> IO token + -- | Return @Just token@ if the rate limit allows, 'Nothing' if ahead of + -- schedule or if the queue is empty in 'Raw.Block' mode. Throws + -- 'QueueStarved' on empty queue in 'Raw.Error' mode. + , nonBlockingFetch :: STM.TBQueue token -> IO (Maybe token) + } + +-- | Build a 'RateLimitedFetcher' from a 'RL.RateLimiter'. +-- +-- On the hot path both modes use non-blocking STM ('tryReadTBQueue', never +-- 'readTBQueue' / 'retry'), so no thread parks inside STM while tokens are +-- flowing. The sole exception is the 'Raw.Block' starvation fallback in +-- 'blockingFetch', which uses 'peekTBQueue' as an event-driven gate (see the +-- inline comment for the trade-off analysis). See +-- "Cardano.Benchmarking.PullFiction.Internal.RateLimiter" for the full design. +mkRateLimitedFetcher :: Raw.OnExhaustion + -> RL.RateLimiter + -> RateLimitedFetcher token +mkRateLimitedFetcher onExhaustion rateLimiter = RateLimitedFetcher + { blockingFetch = goBlocking + , nonBlockingFetch = goNonBlocking + } + where + goBlocking queue = do + now <- Clock.getTime + result <- STM.atomically $ RL.waitToken now rateLimiter queue + case result of + Just (token, delay) -> do + -- Delays this thread and not the global RateLimiter. + threadDelayNanos (Clock.toNanoSecs delay) + pure token + -- The queue is empty. + Nothing -> case onExhaustion of + Raw.Error -> + -- The payload queue is empty. The payload builder cannot keep up + -- with the configured TPS demand. At this stage of the library we + -- treat this as a fatal error rather than silently degrading + -- throughput; the user must either reduce TPS, increase the number + -- of initial inputs, or parallelise the builder. + throwIO $ QueueStarved + "blockingFetch: payload queue empty, cannot keep up with TPS." + Raw.Block -> do + -- Gate: park until the builder produces at least one payload. + -- + -- 'peekTBQueue' retries (parks the thread via STM retry) until the + -- queue is non-empty, then succeeds without consuming the item. + -- This is event-driven: the thread uses zero CPU while parked and + -- wakes as soon as the builder writes. + -- + -- The stale-clock concern documented in 'RL.waitToken' does not + -- apply here: 'goBlocking' captures a fresh timestamp on every + -- iteration, so the rate limiter always sees an accurate clock. + -- Fairness is likewise unaffected: the rate limiter's FIFO property + -- comes from the atomic slot claiming inside 'waitToken', not from + -- the retry mechanism. + -- + -- Trade-off: when N workers are starved on the same queue, a single + -- builder write wakes all N (GHC's STM wake-all). N-1 fail + -- 'tryReadTBQueue' inside 'waitToken' and re-park. This is bounded + -- by the number of targets per workload and is far cheaper than + -- polling ('threadDelay' would cause N wakeups per requested sleep + -- time regardless of builder activity). + _ <- STM.atomically $ STM.peekTBQueue queue + goBlocking queue + goNonBlocking queue = do + now <- Clock.getTime + result <- STM.atomically $ RL.tryWaitToken now rateLimiter queue + case result of + -- Rate limited, discard the nanoseconds and return. + Left _ -> pure Nothing + -- Not rate limited and the queue was not empty. + Right (Just token) -> pure (Just token) + -- The queue is empty. + Right Nothing -> case onExhaustion of + Raw.Error -> + -- The payload queue is empty. The payload builder cannot keep up + -- with the configured TPS demand. At this stage of the library we + -- treat this as a fatal error rather than silently degrading + -- throughput; the user must either reduce TPS, increase the number + -- of initial inputs, or parallelise the builder. + throwIO $ QueueStarved + "nonBlockingFetch: payload queue empty, cannot keep up with TPS." + Raw.Block -> pure Nothing + +-- | Safely sleep for a duration in nanoseconds. +-- +-- Converts nanoseconds to microseconds for 'threadDelay'. To prevent integer +-- overflow on 32-bit systems (where 'Int' maxes out at ~2147s), the delay is +-- clamped to 'maxBound :: Int'. This ensures that even with extremely low TPS +-- configurations (TPS below ~0.0005), the generator sleeps for the maximum +-- representable period rather than wrapping around to a small or negative value +-- and triggering an accidental token burst. +-- Replaces: `threadDelay (fromIntegral (Clock.toNanoSecs nanos `div` 1_000))`. +threadDelayNanos :: Integer -> IO () +threadDelayNanos nanos = + let micros = nanos `div` 1_000 + clamped = fromIntegral (min (fromIntegral (maxBound :: Int)) micros) + in when (clamped > 0) $ threadDelay clamped + +-------------------------------------------------------------------------------- +-- Workload runner. +-------------------------------------------------------------------------------- + +-- | A worker callback that runs inside a labeled 'Async.Async'. +-- +-- 'runWorkload' builds the rate-limited, recycling fetch functions for each +-- target and spawns a labeled async that calls this callback. The callback +-- receives: +-- +-- 1. The fully resolved 'Runtime.Target' (carries addr, port, batch size, +-- and target name for error attribution). +-- 2. @fetchPayload@: blocking fetch that claims one rate-limit slot, reads a +-- @(payload, [input])@ pair from the payload queue, writes the @[input]@ +-- component back to the workload's input queue, and returns the @payload@. +-- 3. @tryFetchPayload@: non-blocking variant that returns @Nothing@ when +-- rate-limited. On success, writes inputs back and returns the payload the +-- same way. +-- +-- Both fetch functions handle the @[input]@ recycling automatically: whatever +-- inputs the builder pairs with the payload are written back to the input queue +-- after each fetch. The callback must not write to the input queue itself +-- (doing so would duplicate inputs). Its only responsibilities are delivering +-- the payload and any application-level bookkeeping. +-- +-- The thread is already labeled @workloadName\/targetName@ by 'runWorkload'. +-- The callback body runs for the lifetime of the generator. It should not +-- create its own async or label its own thread; 'runWorkload' handles both. +type TargetWorker input payload + = Runtime.Target input payload -- ^ The resolved target. + -> IO payload -- ^ Blocking fetch (rate-limited, recycles inputs). + -> IO (Maybe payload) -- ^ Non-blocking fetch (rate-limited, recycles inputs). + -> IO () -- ^ Worker body (runs inside labeled async). + +-- | Run a load-generation workload: for each target, build rate-limited fetch +-- functions that recycle consumed inputs, spawn a labeled async, and call the +-- worker callback inside it. +-- +-- Rate limiter creation and the shared\/independent decision are handled by +-- 'Runtime.resolve'. This function simply activates the pre-built limiters. +-- +-- For each target the function: +-- +-- 1. Builds a 'RateLimitedFetcher' from the target's 'Runtime.rateLimiter'. +-- 2. Wraps it with pipe recycling to produce @fetchPayload :: IO payload@ and +-- @tryFetchPayload :: IO (Maybe payload)@. +-- 3. Computes a thread label: @workloadName ++ \"\/\" ++ targetName@. +-- 4. Creates an 'Async.Async' that labels the thread, then runs the worker +-- callback. +-- +-- Returns the list of worker asyncs (__unlinked__). Callers decide how to +-- monitor them: 'Main.hs' links them for immediate propagation; the test +-- harness polls synchronously so Tasty's 'withResource' can cache the +-- exception. +runWorkload + :: Runtime.Workload input payload + -> TargetWorker input payload + -> IO [Async.Async ()] +runWorkload workload targetWorker = + mapM + (\target -> do + let fetcher = mkRateLimitedFetcher + (Runtime.onExhaustion target) + (Runtime.rateLimiter target) + pipe = Runtime.targetPipe target + -- Fetch one payload (blocking), write its [input] back, return payload. + fetchPayload = do + (payload, recycledInputs) <- + (blockingFetch fetcher) (Runtime.pipePayloadQueue pipe) + -- Recycling is a separate STM transaction from the fetch above. + -- Merging both into one transaction would widen the critical + -- section: the combined transaction would hold the rate-limiter + -- TVars and the payload queue while also writing to the input + -- queue, increasing contention and risking that a slow recycle + -- (many inputs) stalls other workers competing for rate-limit + -- slots. Keeping them separate means the fetch-and-claim is short; + -- the recycle cannot delay other submissions. + -- + -- Trade-off: between the two transactions, recycled inputs are held + -- only in memory. If the thread is killed in this window, those + -- inputs are lost. This is acceptable; recycling happens on + -- delivery, not on downstream confirmation (see + -- 'Runtime.pipeRecycle' for the full rationale). + STM.atomically $ Runtime.pipeRecycle pipe recycledInputs + pure payload + -- Try to fetch one payload (non-blocking). + tryFetchPayload = do + result <- (nonBlockingFetch fetcher) (Runtime.pipePayloadQueue pipe) + case result of + Nothing -> pure Nothing + Just (payload, recycledInputs) -> do + -- See fetchPayload above for why recycle is a separate + -- transaction. + STM.atomically $ Runtime.pipeRecycle pipe recycledInputs + pure (Just payload) + -- Always labeled threads. + threadLabel = + Runtime.workloadName workload ++ "/" ++ Runtime.targetName target + -- Return async (unlinked, caller decides monitoring strategy). + async <- Async.async $ do + tid <- myThreadId + labelThread tid threadLabel + targetWorker target fetchPayload tryFetchPayload + pure async + ) + (Map.elems (Runtime.targets workload)) diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Client.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Client.hs new file mode 100644 index 00000000000..2a56f4cfca9 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Client.hs @@ -0,0 +1,334 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE PackageImports #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.Client + ( mkClient + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Arrow ((&&&)) +import Data.Foldable (toList) +import Numeric.Natural (Natural) +import Data.List.NonEmpty qualified as NE +---------------- +-- bytestring -- +---------------- +import Data.ByteString qualified as BS +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +---------------- +-- containers -- +---------------- +import Data.Sequence qualified as Seq +import Data.Set qualified as Set +------------------- +-- contra-tracer -- +------------------- +import "contra-tracer" Control.Tracer (Tracer, traceWith) +------------------------- +-- ouroboros-consensus -- +------------------------- +import Ouroboros.Consensus.Cardano qualified as Consensus (CardanoBlock) +import Ouroboros.Consensus.Cardano.Block qualified as Block +import Ouroboros.Consensus.Ledger.SupportsMempool qualified as Mempool +import Ouroboros.Consensus.Shelley.Eras qualified as Eras +import Ouroboros.Consensus.Shelley.Ledger.Mempool + qualified as Mempool (TxId(ShelleyTxId)) +----------------------- +-- ouroboros-network -- +----------------------- +import Ouroboros.Network.Protocol.TxSubmission2.Client qualified as TxSub +import Ouroboros.Network.Protocol.TxSubmission2.Type qualified as TxSub +import Ouroboros.Network.SizeInBytes qualified as Net +--------------------- +-- tx-centrifuge -- +--------------------- +import Cardano.Benchmarking.TxCentrifuge.Tracing qualified as Tracing + +-------------------------------------------------------------------------------- + +type CardanoBlock = Consensus.CardanoBlock Eras.StandardCrypto + +-- | Internal state: the unacknowledged tx sequence (oldest first, matching the +-- server's FIFO). Acks remove elements from the front; new announcements are +-- appended at the back. +-- Uses 'Seq' for O(1) length and O(log n) take/drop (vs O(n) for lists). +type UnAcked = Seq.Seq (Api.Tx Api.ConwayEra) + +-- | Create a TxSubmission2 client that pulls txs from caller-supplied IO +-- actions. No intermediate queue, the blocking action is called for the first +-- mandatory tx, and the non-blocking action drains the rest up to the +-- requested count, capped by @maxBatchSize@. +mkClient + -- | Tracer for structured TxSubmission2 events. + :: Tracer IO Tracing.TxSubmission + -- | Target name (remote node identifier). + -> String + -- | Max batch size per request. + -> Natural + -- | Blocking: wait for a token (must not fail). + -> IO (Api.Tx Api.ConwayEra) + -- | NonBlocking: poll for a token. + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -> TxSub.TxSubmissionClient + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + IO + () +mkClient tracer targetName maxBatchSize blockingFetch nonBlockingFetch = + TxSub.TxSubmissionClient $ pure $ TxSub.ClientStIdle + { TxSub.recvMsgRequestTxIds = + requestTxIds + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + Seq.empty + , TxSub.recvMsgRequestTxs = + requestTxs + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + Seq.empty + } + +-------------------------------------------------------------------------------- + +-- | Drain up to @n@ tokens without blocking. +-- This is the primary token consumption path for both 'SingBlocking' +-- (after the first mandatory tx) and 'SingNonBlocking' requests. +-- Stops as soon as the callback returns 'Nothing' (rate-limited). +drainUpTo :: Int + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -> IO [Api.Tx Api.ConwayEra] +drainUpTo 0 _ = pure [] +drainUpTo n fetch = fetch >>= \case + Nothing -> pure [] + Just x -> (x :) <$> drainUpTo (n - 1) fetch + +-- | Handle @MsgRequestTxIds@. +-- +-- TxSubmission2 protocol semantics: +-- SingBlocking → must return at least 1 tx; may block. +-- SingNonBlocking → return 0..reqNum txs; must not block. +-- +-- In both cases, after satisfying the minimum (1 for blocking, 0 for +-- non-blocking), 'drainUpTo' fills the rest via non-blocking calls. +-- Under sustained load a Cardano node operates at near-full mempool capacity +-- and almost exclusively issues 'SingNonBlocking' requests, so the +-- non-blocking path is the dominant token consumption path. +-- See the fairness analysis in WorkloadRunner.runWorkload for details. +requestTxIds + :: forall blocking. + -- | Tracer for structured TxSubmission2 events. + Tracer IO Tracing.TxSubmission + -- | Target name (remote node identifier). + -> String + -- | Max batch size per request. + -> Natural + -- | Blocking: wait for a token (must not fail). + -> IO (Api.Tx Api.ConwayEra) + -- | NonBlocking: poll for a token. + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -- | Unacknowledged transactions (oldest first). + -> UnAcked + -> TxSub.SingBlockingStyle blocking + -- | Number of tx IDs to ACK. + -> TxSub.NumTxIdsToAck + -- | Number of tx IDs requested. + -> TxSub.NumTxIdsToReq + -> IO ( TxSub.ClientStTxIds + blocking + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + IO + () + ) +requestTxIds + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked + blocking + (TxSub.NumTxIdsToAck ackNum) + (TxSub.NumTxIdsToReq reqNum) + = do + -- Trace: node asked for tx id announcements. + ---------------------------------------------- + traceWith tracer $ + Tracing.RequestTxIds + targetName + (map txId (toList unacked)) -- TxIds not yet acknowledged. + (fromIntegral ackNum) -- how many the node is ACKing. + (fromIntegral reqNum) -- how many new TxIds it wants. + -- Pull txs from the callbacks, capped by maxBatchSize. + -------------------------------------------------------- + newTxs <- do + let !effectiveReq = min + (fromIntegral reqNum) + (fromIntegral maxBatchSize :: Int) + case blocking of + TxSub.SingBlocking -> do + -- Block for exactly one tx (protocol minimum), then + -- remaining up to effectiveReq-1 without blocking. + tx1 <- blockingFetch + rest <- drainUpTo (effectiveReq - 1) nonBlockingFetch + pure (tx1 : rest) + TxSub.SingNonBlocking -> do + -- Return whatever is available up to effectiveReq. + drainUpTo effectiveReq nonBlockingFetch + -- Drop acknowledged txs. + -------------------------------------- + -- Drop acknowledged txs from the front (oldest first, matching the server's + -- FIFO), then append new announcements at the back. + let !unacked' = + let !remaining = Seq.drop (fromIntegral ackNum) unacked + in remaining Seq.>< Seq.fromList newTxs + -- Trace: we replied with tx id announcements. + ----------------------------------------------- + traceWith tracer $ + Tracing.ReplyTxIds + targetName + (fromIntegral ackNum) -- how many the node is ACKing. + (fromIntegral reqNum) -- how many new TxIds it wants. + (map txId (toList unacked')) -- updated unacked after ACK + new. + (map txId newTxs) -- TxIds we announced in this reply. + -- Build the protocol continuation. + ----------------------------------- + let nextIdle = TxSub.ClientStIdle + -- Continues the protocol loop with the updated unacked list. + { TxSub.recvMsgRequestTxIds = + requestTxIds + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked' + , TxSub.recvMsgRequestTxs = + requestTxs + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked' + } + -- Answer with what we obtained from the callbacks. + --------------------------------------------------- + case blocking of + TxSub.SingBlocking -> do + case NE.nonEmpty newTxs of + Nothing -> error "requestTxIds: blocking fetch returned empty list!" + Just txs -> do + pure $ TxSub.SendMsgReplyTxIds + (TxSub.BlockingReply $ fmap txToIdSize txs ) + nextIdle + TxSub.SingNonBlocking -> do + pure $ TxSub.SendMsgReplyTxIds + (TxSub.NonBlockingReply $ fmap txToIdSize newTxs) + nextIdle + +-- | Handle @MsgRequestTxs@: look up requested tx ids in the unacked list and +-- send back the matching transactions. +requestTxs + -- | Tracer for structured TxSubmission2 events. + :: Tracer IO Tracing.TxSubmission + -- | Target name (remote node identifier). + -> String + -- | Max batch size per request. + -> Natural + -- | Blocking: wait for a token (must not fail). + -> IO (Api.Tx Api.ConwayEra) + -- | NonBlocking: poll for a token. + -> IO (Maybe (Api.Tx Api.ConwayEra)) + -- | Unacknowledged transactions (oldest first). + -> UnAcked + -> [Mempool.GenTxId CardanoBlock] + -> IO ( TxSub.ClientStTxs + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + IO + () + ) +requestTxs + tracer + targetName maxBatchSize + blockingFetch nonBlockingFetch + unacked + requestedTxIds + = do + -- Trace: node asked for full transactions by TxId. + ---------------------------------------------------- + traceWith tracer $ + Tracing.RequestTxs + targetName + (map fromGenTxId requestedTxIds) -- TxIds the node requested. + -- Build response. + ------------------ + -- The list is converted to a Set for efficient filtering. + let requestedTxIdsSet = Set.fromList (map fromGenTxId requestedTxIds) + txIdsToSend = filter + (\tx -> + txId tx `Set.member` requestedTxIdsSet + ) + (toList unacked) + -- Trace: we replied with the matching transactions. + ----------------------------------------------------- + traceWith tracer $ + Tracing.ReplyTxs + targetName + (map fromGenTxId requestedTxIds) -- TxIds the node requested. + (map txId txIdsToSend) -- TxIds we actually sent. + -- Response and protocol continuation. + -------------------------------------- + pure $ TxSub.SendMsgReplyTxs (map toGenTx txIdsToSend) $ TxSub.ClientStIdle + -- Continues the protocol loop with no changes to the unacked list. + { TxSub.recvMsgRequestTxIds = + requestTxIds tracer targetName + maxBatchSize blockingFetch nonBlockingFetch + unacked + , TxSub.recvMsgRequestTxs = + requestTxs tracer targetName + maxBatchSize blockingFetch nonBlockingFetch + unacked + } + +-- Helpers. +------------------------------------------------------------------------------- + +-- | Extract the cardano-api TxId from a signed transaction. +txId :: Api.Tx Api.ConwayEra -> Api.TxId +txId = Api.getTxId . Api.getTxBody + +-- | Convert a Tx to (GenTxId, SizeInBytes) for announcement. +txToIdSize :: Api.Tx Api.ConwayEra + -> (Mempool.GenTxId CardanoBlock, Net.SizeInBytes) +txToIdSize = + (Mempool.txId . toGenTx) + &&& (Net.SizeInBytes . fromIntegral . txSize) + where + txSize :: Api.Tx Api.ConwayEra -> Int + txSize = BS.length . Api.serialiseToCBOR + +-- | Convert a cardano-api Tx to a consensus GenTx. +toGenTx :: Api.Tx Api.ConwayEra -> Mempool.GenTx CardanoBlock +toGenTx tx = Api.toConsensusGenTx $ Api.TxInMode Api.shelleyBasedEra tx + +-- | Convert a consensus GenTxId back to a cardano-api TxId. +-- +-- NOTE: this generator only produces Conway-era transactions. +-- If the Cardano network undergoes a hard fork to a new era while the +-- generator is running, this will fail. Update the pattern match when adding +-- support for a new era. +fromGenTxId :: Mempool.GenTxId CardanoBlock -> Api.TxId +fromGenTxId (Block.GenTxIdConway (Mempool.ShelleyTxId i)) = + Api.fromShelleyTxId i +fromGenTxId other = error $ "fromGenTxId: Conway only, received " ++ show other diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Connection.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Connection.hs new file mode 100644 index 00000000000..69f0f7077e1 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Connection.hs @@ -0,0 +1,349 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.Connection + ( CardanoBlock + , ConnectClient + , connect + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Monad (forever) +import Control.Monad.IO.Class (liftIO) +import Data.Foldable (fold) +import Data.Proxy (Proxy (..)) +import Data.Void (Void, absurd) +---------------- +-- bytestring -- +---------------- +import Data.ByteString.Lazy qualified as BSL +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +---------------- +-- io-classes -- +---------------- +import Control.Monad.Class.MonadTimer qualified as MonadTimer +------------- +-- network -- +------------- +import Network.Socket qualified as Socket +----------------- +-- network-mux -- +----------------- +import Network.Mux qualified as Mux +-------------------------- +-- ouroboros-consensus -- +-------------------------- +import Ouroboros.Consensus.Block.Abstract qualified as Block +import Ouroboros.Consensus.Cardano qualified as Consensus (CardanoBlock) +import Ouroboros.Consensus.Ledger.SupportsMempool qualified as Mempool +import Ouroboros.Consensus.Network.NodeToNode qualified as NetN2N +import Ouroboros.Consensus.Node.NetworkProtocolVersion qualified as NetVer +import Ouroboros.Consensus.Node.Run () +import Ouroboros.Consensus.Shelley.Eras qualified as Eras +-- Orphan instances needed for +-- RunNode / SupportedNetworkProtocolVersion CardanoBlock +import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () +-------------------------- +-- ouroboros-network -- +-------------------------- +import Ouroboros.Network.Channel qualified as Channel +import Ouroboros.Network.Context qualified as NetCtx +import Ouroboros.Network.ControlMessage qualified as ControlMsg +import Ouroboros.Network.DeltaQ qualified as DeltaQ +import Ouroboros.Network.Driver qualified as Driver +import Ouroboros.Network.KeepAlive qualified as KeepAlive +import Ouroboros.Network.Magic qualified as Magic +import Ouroboros.Network.Mux qualified as NetMux +import Ouroboros.Network.NodeToClient qualified as NtC +import Ouroboros.Network.NodeToNode qualified as NtN +import Ouroboros.Network.PeerSelection.PeerSharing qualified as PeerSharing +import Ouroboros.Network.PeerSelection.PeerSharing.Codec qualified as PSCodec +import Ouroboros.Network.Protocol.BlockFetch.Client qualified as BFClient +import Ouroboros.Network.Protocol.Handshake.Version qualified as Handshake +import Ouroboros.Network.Protocol.KeepAlive.Client qualified as KAClient +import Ouroboros.Network.Protocol.KeepAlive.Codec qualified as KACodec +import Ouroboros.Network.Protocol.PeerSharing.Client qualified as PSClient +import Ouroboros.Network.Protocol.TxSubmission2.Client qualified as TxSub +import Ouroboros.Network.Snocket qualified as Snocket +------------------------------- +-- ouroboros-network-framework -- +------------------------------- +import Ouroboros.Network.IOManager qualified as IOManager +------------ +-- random -- +------------ +import System.Random qualified as Random +--------------- +-- serialise -- +--------------- +import Codec.Serialise qualified as Serialise +--------- +-- stm -- +--------- +import Control.Concurrent.Class.MonadSTM.Strict qualified as StrictSTM +--------------------- +-- tx-centrifuge -- +--------------------- +import Cardano.Benchmarking.TxCentrifuge.Tracing + qualified as Tracing + +-------------------------------------------------------------------------------- + +type CardanoBlock = Consensus.CardanoBlock Eras.StandardCrypto + +-- | A function that connects to a remote node and runs a TxSubmission2 client. +-- +-- Returns @Left msg@ when the connection fails (handshake error) or terminates +-- (mux returns). The caller is responsible for adding context (e.g. a +-- workload\/target label) and deciding how to surface the error. +type ConnectClient = + Socket.AddrInfo + -> TxSub.TxSubmissionClient + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + IO + () + -> IO (Either String ()) + +-- | Connect to a remote cardano-node via NodeToNode protocols. +-- Runs null ChainSync, null BlockFetch, null PeerSharing, KeepAlive, and the +-- given TxSubmission2 client. +-- +-- Returns @Left msg@ on handshake failure or unexpected connection termination. +-- The @Right@ case is unreachable (the mux never returns successfully). +connect + :: IOManager.IOManager + -> Block.CodecConfig CardanoBlock + -> Magic.NetworkMagic + -> Tracing.Tracers + -> ConnectClient +connect + ioManager + codecConfig + networkMagic + tracers + remoteAddr + myTxSubClient = do + done <- NtN.connectTo (Snocket.socketSnocket ioManager) + NtN.NetworkConnectTracers + { NtN.nctMuxTracers = Mux.nullTracers + , NtN.nctHandshakeTracer = mempty + } + peerMultiplex + Nothing + (Socket.addrAddress remoteAddr) + case done of + Left err -> pure $ Left $ + "handshake failed: " ++ show err + Right choice -> case choice of + Left () -> pure $ Left + "connection terminated unexpectedly" + Right void -> absurd void + + where + + n2nVer :: NetVer.NodeToNodeVersion + n2nVer = NetVer.NodeToNodeV_14 + + blkN2nVer :: NetVer.BlockNodeToNodeVersion CardanoBlock + blkN2nVer = supportedVers Map.! n2nVer + + supportedVers + :: Map.Map + NetVer.NodeToNodeVersion + ( NetVer.BlockNodeToNodeVersion + CardanoBlock + ) + supportedVers = + NetVer.supportedNodeToNodeVersions (Proxy @CardanoBlock) + + myCodecs + :: NetN2N.Codecs + CardanoBlock + NtN.RemoteAddress + Serialise.DeserialiseFailure + IO + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + BSL.ByteString + myCodecs = + NetN2N.defaultCodecs + codecConfig + blkN2nVer + PSCodec.encodeRemoteAddress + PSCodec.decodeRemoteAddress + n2nVer + + keepAliveTimeout :: KeepAlive.KeepAliveInterval + keepAliveTimeout = KeepAlive.KeepAliveInterval 10 + + peerMultiplex + :: NtN.Versions + NetVer.NodeToNodeVersion + NtN.NodeToNodeVersionData + ( NetMux.OuroborosApplication + 'Mux.InitiatorMode + ( NetCtx.MinimalInitiatorContext + NtN.RemoteAddress + ) + ( NetCtx.ResponderContext + NtN.RemoteAddress + ) + BSL.ByteString + IO + () + Void + ) + peerMultiplex = + Handshake.simpleSingletonVersions + n2nVer + ( NtN.NodeToNodeVersionData + { NtN.networkMagic = networkMagic + , NtN.diffusionMode = NtN.InitiatorOnlyDiffusionMode + , NtN.peerSharing = PeerSharing.PeerSharingDisabled + , NtN.query = False + } + ) + $ \n2nData -> + mkApp $ + NtN.nodeToNodeProtocols + NtN.defaultMiniProtocolParameters + NtN.NodeToNodeProtocols + { NtN.chainSyncProtocol = + NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb + $ \_ctx channel -> + Driver.runPeer + mempty + ( NetN2N.cChainSyncCodec + myCodecs + ) + channel + NtC.chainSyncPeerNull + + , NtN.blockFetchProtocol = + NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb + $ \_ctx channel -> + Driver.runPeer + mempty + ( NetN2N.cBlockFetchCodec + myCodecs + ) + channel + ( BFClient.blockFetchClientPeer + blockFetchClientNull + ) + + , NtN.keepAliveProtocol = + NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb + $ \ctx channel -> + kaClient + ( NetCtx.remoteAddress + $ NetCtx.micConnectionId + ctx + ) + channel + + , NtN.txSubmissionProtocol = + NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb + $ \_ctx channel -> + Driver.runPeer + ( Tracing.trTxSubmission2 + tracers + ) + ( NetN2N.cTxSubmission2Codec + myCodecs + ) + channel + ( TxSub.txSubmissionClientPeer + myTxSubClient + ) + + , NtN.peerSharingProtocol = + NetMux.InitiatorProtocolOnly + $ NetMux.MiniProtocolCb + $ \_ctx channel -> + Driver.runPeer + mempty + ( NetN2N.cPeerSharingCodec + myCodecs + ) + channel + ( PSClient.peerSharingClientPeer + peerSharingClientNull + ) + } + n2nVer + n2nData + + mkApp + :: NetMux.OuroborosBundle + mode initiatorCtx responderCtx + bs m a b + -> NetMux.OuroborosApplication + mode initiatorCtx responderCtx + bs m a b + mkApp bundle = NetMux.OuroborosApplication $ fold bundle + + kaClient + :: Ord remotePeer + => remotePeer + -> Channel.Channel IO BSL.ByteString + -> IO ((), Maybe BSL.ByteString) + kaClient them channel = do + keepAliveRng <- Random.newStdGen + peerGSVMap <- + liftIO + . StrictSTM.newTVarIO + $ Map.singleton them DeltaQ.defaultGSV + Driver.runPeerWithLimits + (Tracing.trKeepAlive tracers) + (NetN2N.cKeepAliveCodec myCodecs) + (KACodec.byteLimitsKeepAlive (const 0)) + KACodec.timeLimitsKeepAlive + channel + $ KAClient.keepAliveClientPeer + $ KeepAlive.keepAliveClient + mempty + keepAliveRng + ( ControlMsg.continueForever + (Proxy :: Proxy IO) + ) + them + peerGSVMap + keepAliveTimeout + +-- | Null block fetch client. +blockFetchClientNull + :: MonadTimer.MonadTimer m + => BFClient.BlockFetchClient block point m a +blockFetchClientNull = + BFClient.BlockFetchClient + $ forever + $ MonadTimer.threadDelay (24 * 60 * 60) + +-- | Null peer sharing client. +peerSharingClientNull + :: MonadTimer.MonadTimer m + => PSClient.PeerSharingClient addr m a +peerSharingClientNull = + PSClient.SendMsgDone + $ forever + $ MonadTimer.threadDelay (24 * 60 * 60) diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Fund.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Fund.hs new file mode 100644 index 00000000000..a0476a11a4d --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Fund.hs @@ -0,0 +1,182 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.Fund + ( Fund (..) + , loadFunds + , genesisTxIn + , castToGenesisUTxOKey + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Bifunctor (first) +import Data.IORef qualified as IORef +import Text.Read (readMaybe) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +import Data.Aeson ((.:), (.:?)) +import Data.Aeson.Types qualified as Aeson +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +---------- +-- text -- +---------- +import Data.Text qualified as T +import Data.Text.Encoding qualified as T + +-------------------------------------------------------------------------------- + +-- | A spendable fund: a UTxO reference, its Lovelace value, and the signing key +-- required to spend it. +data Fund = Fund + { fundTxIn :: !Api.TxIn + -- | Lovelace amount. + , fundValue :: !Integer + -- | Key to spend this UTxO. + , fundSignKey :: !(Api.SigningKey Api.PaymentKey) + } + +-------------------------------------------------------------------------------- +-- JSON loading +-------------------------------------------------------------------------------- + +-- | Internal: JSON-parseable fund entry. Two variants: +-- +-- * 'FundEntryPayment': a regular fund with an explicit UTxO reference. +-- @{ "tx_in": "txid#ix", "value": 1000000, "signing_key": "payment.skey" }@ +-- +-- * 'FundEntryGenesis': a genesis UTxO fund identified only by its key. +-- The TxIn is derived via 'Api.genesisUTxOPseudoTxIn' (always TxIx 0). +-- @{ "signing_key": "genesis.skey", "value": 1000000 }@ +data FundEntry + = FundEntryPayment !Api.TxIn !Integer !FilePath + | FundEntryGenesis !Integer !FilePath + +instance Aeson.FromJSON FundEntry where + parseJSON = Aeson.withObject "Fund" $ \o -> do + mbTxInStr <- o .:? "tx_in" + val <- o .: "value" + keyPath <- o .: "signing_key" + case mbTxInStr of + Just txInStr -> do + txIn <- parseTxIn txInStr + pure (FundEntryPayment txIn val keyPath) + Nothing -> pure (FundEntryGenesis val keyPath) + +-- | Parse @"txid#ix"@ format. Both parts are required. +parseTxIn :: T.Text -> Aeson.Parser Api.TxIn +parseTxIn s = + let (txIdHex, rest) = T.breakOn "#" s + in case T.uncons rest of + Just ('#', ds) -> + case Api.deserialiseFromRawBytesHex @Api.TxId (T.encodeUtf8 txIdHex) of + Left err -> fail $ "Invalid TxId: " ++ show err + Right txId -> case readMaybe (T.unpack ds) of + Nothing -> fail $ "Invalid TxIx: expected an integer, got " ++ show ds + Just ix -> pure $ Api.TxIn txId (Api.TxIx ix) + _ -> fail "Invalid TxIn: expected \"txid#ix\" format" + +-- | Load funds from a JSON file and return them as a list. +-- The JSON file should contain an array of fund objects, each with a +-- @"signingKey"@ field pointing to a @.skey@ file. +-- Signing keys are cached by path to avoid redundant disk reads. +-- +-- For key-only entries (no @"txIn"@), the genesis UTxO pseudo-TxIn is +-- derived from the signing key using the provided 'Api.NetworkId'. +-- +-- NOTE: the entire JSON array is decoded into memory before returning. +-- For very large fund files a streaming parser (e.g. json-stream) could +-- yield funds incrementally so the caller can start filling queues before +-- the file is fully read. +loadFunds :: Api.NetworkId -> FilePath -> IO (Either String [Fund]) +loadFunds networkId path = do + result <- Aeson.eitherDecodeFileStrict' path + case result of + Left err -> pure (Left err) + Right (entries :: [FundEntry]) -> do + keyCache <- IORef.newIORef Map.empty + eFunds <- mapM (entryToFund networkId keyCache) entries + case sequence eFunds of + Left err -> pure (Left err) + Right funds -> pure (Right funds) + +-- | Convert a JSON entry to a Fund by loading its signing key (cached). +entryToFund + :: Api.NetworkId + -> IORef.IORef (Map.Map FilePath (Api.SigningKey Api.PaymentKey)) + -> FundEntry + -> IO (Either String Fund) +entryToFund networkId cacheRef entry = do + let keyPath = entryKeyPath entry + cache <- IORef.readIORef cacheRef + case Map.lookup keyPath cache of + Just key -> pure $ Right $ mkFund key + Nothing -> do + eKey <- readSigningKey keyPath + case eKey of + Left err -> pure $ Left $ + "Failed to load signing key " + ++ keyPath ++ ": " ++ err + Right key -> do + IORef.modifyIORef' cacheRef (Map.insert keyPath key) + pure $ Right $ mkFund key + where + + entryKeyPath :: FundEntry -> FilePath + entryKeyPath (FundEntryPayment _ _ p) = p + entryKeyPath (FundEntryGenesis _ p) = p + + mkFund :: Api.SigningKey Api.PaymentKey -> Fund + mkFund key = case entry of + FundEntryPayment txIn val _ -> Fund txIn val key + FundEntryGenesis val _ -> Fund (genesisTxIn networkId key) val key + +-- | Derive the genesis UTxO pseudo-TxIn from a payment signing key. +-- Casts to 'Api.GenesisUTxOKey' to compute the key hash expected by +-- 'Api.genesisUTxOPseudoTxIn'. +genesisTxIn :: Api.NetworkId -> Api.SigningKey Api.PaymentKey -> Api.TxIn +genesisTxIn networkId + = Api.genesisUTxOPseudoTxIn networkId + . Api.verificationKeyHash + . Api.getVerificationKey + . castToGenesisUTxOKey + +-- | Cast a 'Api.PaymentKey' signing key to a 'Api.GenesisUTxOKey' signing key. +-- Both key types use the same underlying ed25519 representation; this cast +-- enables computing the genesis UTxO pseudo-TxIn via 'Api.genesisUTxOPseudoTxIn'. +castToGenesisUTxOKey + :: Api.SigningKey Api.PaymentKey + -> Api.SigningKey Api.GenesisUTxOKey +castToGenesisUTxOKey (Api.PaymentSigningKey skey) = + Api.GenesisUTxOSigningKey skey + +-- | Read a signing key from a text envelope file. +-- Accepts both @PaymentSigningKey_ed25519@ and +-- @GenesisUTxOVerificationKey_ed25519@ key types. +-- Genesis UTxO keys are cast to payment keys. +readSigningKey :: FilePath -> IO (Either String (Api.SigningKey Api.PaymentKey)) +readSigningKey fp = do + result <- Api.readFileTextEnvelopeAnyOf + [ Api.FromSomeType (Api.AsSigningKey Api.AsPaymentKey) id + , Api.FromSomeType + (Api.AsSigningKey Api.AsGenesisUTxOKey) + Api.castSigningKey + ] + (Api.File fp) + pure $ first show result diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing.hs new file mode 100644 index 00000000000..b3052c86fe5 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing.hs @@ -0,0 +1,492 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PackageImports #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-------------------------------------------------------------------------------- + +-- | Tracer setup for the tx-centrifuge. Creates configured contra-tracers +-- backed by trace-dispatcher and reads optional @TraceOptions@ from the +-- generator config file. +module Cardano.Benchmarking.TxCentrifuge.Tracing + ( Tracers (..) + , setupTracers, nullTracers + , BuilderTrace (..) + , mkBuilderNewTx + , mkBuilderRecycle + , TxSubmission (..) + -- * Re-exports + , traceWith + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Exception (SomeException, try) +----------- +-- aeson -- +----------- +import Data.Aeson (Value (String), (.=), object) +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +----------------- +-- containers -- +----------------- +import Data.Map.Strict qualified as Map +------------------- +-- contra-tracer -- +------------------- +import "contra-tracer" Control.Tracer (Tracer (..), traceWith) +-------------------------- +-- ouroboros-consensus -- +-------------------------- +import Ouroboros.Consensus.Cardano qualified as Consensus (CardanoBlock) +import Ouroboros.Consensus.Ledger.SupportsMempool qualified as Mempool +import Ouroboros.Consensus.Shelley.Eras qualified as Eras +-- Orphan instances needed for LedgerSupportsProtocol (ShelleyBlock ...) +import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () +-------------------------- +-- ouroboros-network -- +-------------------------- +import Ouroboros.Network.Driver.Simple qualified as Simple +import Ouroboros.Network.Protocol.KeepAlive.Type qualified as KA +import Ouroboros.Network.Protocol.TxSubmission2.Type qualified as STX +---------- +-- text -- +---------- +import Data.Text qualified as Text +---------------------- +-- trace-dispatcher -- +---------------------- +import Cardano.Logging qualified as Logging +--------------------- +-- tx-centrifuge -- +--------------------- +import Cardano.Benchmarking.TxCentrifuge.Fund qualified as Fund +-- Imported for its orphan LogFormatting / MetaTrace instances. +import Cardano.Benchmarking.TxCentrifuge.Tracing.Orphans () + +-------------------------------------------------------------------------------- +-- Tracers +-------------------------------------------------------------------------------- + +type CardanoBlock = Consensus.CardanoBlock Eras.StandardCrypto + +data Tracers = Tracers + { -- | Builder trace: transaction construction and recycling events. + trBuilder + :: !(Tracer IO BuilderTrace) + -- | Clean, structured TxSubmission2 trace emitted by Client.hs. + , trTxSubmission + :: !(Tracer IO TxSubmission) + -- | Low-level protocol trace from ouroboros-network's Driver.runPeer. + , trTxSubmission2 + :: !( Tracer + IO + ( Simple.TraceSendRecv + ( STX.TxSubmission2 + (Mempool.GenTxId CardanoBlock) + (Mempool.GenTx CardanoBlock) + ) + ) + ) + , trKeepAlive + :: !( Tracer + IO + (Simple.TraceSendRecv KA.KeepAlive) + ) + } + +-- | All-silent tracers. +nullTracers :: Tracers +nullTracers = Tracers + { trBuilder = Tracer (\_ -> pure ()) + , trTxSubmission = Tracer (\_ -> pure ()) + , trTxSubmission2 = Tracer (\_ -> pure ()) + , trKeepAlive = Tracer (\_ -> pure ()) + } + +-------------------------------------------------------------------------------- +-- Tracer setup +-------------------------------------------------------------------------------- + +-- | Create configured tracers from the tx-centrifuge config file. If the file +-- contains a @TraceOptions@ section, those settings are used. Otherwise falls +-- back to a sensible default (stdout, machine format, severity Debug). +setupTracers :: FilePath -> IO Tracers +setupTracers configFile = do + trConfig <- + either + (\(_ :: SomeException) -> defaultTraceConfig) + id + <$> try (Logging.readConfiguration configFile) + configReflection <- Logging.emptyConfigReflection + stdoutTrace <- Logging.standardTracer + let trForward = mempty + mbTrEkg = Nothing + + -- Builder (TxCentrifuge.Builder.NewTx, TxCentrifuge.Builder.Recycle) + !builderTr <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["TxCentrifuge", "Builder"] + Logging.configureTracers + configReflection trConfig [builderTr] + + -- TxSubmission (TxCentrifuge.TxSubmission.*) + !txSubTraceTr <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["TxCentrifuge", "TxSubmission"] + Logging.configureTracers + configReflection trConfig [txSubTraceTr] + + -- TxSubmission2 (low-level protocol trace) + !txSub2Trace <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["TxSubmission2"] + Logging.configureTracers + configReflection trConfig [txSub2Trace] + + -- KeepAlive + !keepAliveTr <- + Logging.mkCardanoTracer + stdoutTrace trForward mbTrEkg ["KeepAlive"] + Logging.configureTracers + configReflection trConfig [keepAliveTr] + + pure Tracers + { trBuilder = + Tracer $ Logging.traceWith builderTr + , trTxSubmission = + Tracer $ Logging.traceWith txSubTraceTr + , trTxSubmission2 = + Tracer $ Logging.traceWith txSub2Trace + , trKeepAlive = + Tracer $ Logging.traceWith keepAliveTr + } + +-- | Default config: stdout machine format, severity Debug for all namespaces. +defaultTraceConfig :: Logging.TraceConfig +defaultTraceConfig = Logging.emptyTraceConfig + { Logging.tcOptions = Map.fromList + [ ( [] + , [ Logging.ConfSeverity + (Logging.SeverityF (Just Logging.Debug)) + , Logging.ConfBackend + [Logging.Stdout Logging.MachineFormat] + ] + ) + ] + } + +-------------------------------------------------------------------------------- +-- Builder trace messages +-------------------------------------------------------------------------------- + +-- | Trace messages emitted by the payload builder thread. +-- +-- == Builder pipeline +-- +-- The builder consumes input UTxOs (unspent funds) from the input queue, builds +-- and signs a transaction, and enqueues the result for workers to submit. Each +-- transaction produces new output UTxOs. After submission, these outputs are +-- recycled back to the input queue, forming a closed loop: +-- +-- @ +-- inputs --> [builder: build & sign tx] --> (tx, outputs) --> [worker: submit] +-- ^ | +-- +----------------------- recycle outputs ----------------------+ +-- @ +-- +-- == Cardano identifiers +-- +-- The Cardano ledger uses a UTxO (Unspent Transaction Output) model. Every +-- transaction consumes existing UTxOs as /inputs/ and produces new UTxOs as +-- /outputs/. Three types from @cardano-api@ identify these objects: +-- +-- === 'Api.TxId' — transaction identifier +-- +-- A Blake2b-256 hash of the serialised transaction body ('Api.TxBody'). +-- Uniquely identifies a transaction on the blockchain. Rendered as a +-- 64-character hex string via 'Api.serialiseToRawBytesHexText'. +-- +-- === 'Api.TxIx' — output index +-- +-- A zero-based index selecting one output within a transaction. +-- +-- === 'Api.TxIn' — UTxO reference +-- +-- A @('Api.TxId', 'Api.TxIx')@ pair that uniquely identifies a single UTxO on +-- the ledger. The standard display format is @\"\#\\"@, +-- produced by 'Api.renderTxIn'. +-- +-- A transaction's /input/ 'Api.TxIn's reference existing UTxOs being spent. Its +-- /output/ 'Api.TxIn's are derived from the new 'Api.TxId' paired with +-- sequential indices (0, 1, 2, ...). +-- +-- In the tx-centrifuge, each 'Fund' record wraps a 'Api.TxIn' (the UTxO +-- reference), its Lovelace value, and the signing key needed to spend it. +data BuilderTrace + = -- | A new transaction was built. + -- + -- * 'String' — builder name (the workload name, see 'Runtime.builderName'). + -- + -- * 'Api.TxId' — Blake2b-256 hash identifying the new transaction. + -- Obtain via @'Api.getTxId' ('Api.getTxBody' signedTx)@. + -- + -- * @['Fund.Fund']@ (inputs) — funds consumed by this transaction. Each + -- fund's 'Fund.fundTxIn' is a 'Api.TxIn' pointing to an existing UTxO + -- on the ledger. + -- + -- * @['Fund.Fund']@ (outputs) — funds produced by this transaction. Each + -- fund's 'Fund.fundTxIn' is derived from the new 'Api.TxId' and a + -- sequential 'Api.TxIx' index (0, 1, 2, ...). + BuilderNewTx !String !Api.TxId [Fund.Fund] [Fund.Fund] + -- | Output funds were recycled back to the workload's input queue. + -- + -- * 'String' — builder name (the workload name, see 'Runtime.builderName'). + -- + -- In the tx-centrifuge's closed-loop pipeline, output funds of a + -- transaction are recycled so they can be consumed by future transactions, + -- enabling indefinite-duration runs without pre-generating all UTxOs. + -- + -- * @['Fund.Fund']@ — the recycled output funds. + | BuilderRecycle !String [Fund.Fund] + +-- | Build a 'BuilderNewTx' trace from the builder name, a signed transaction, +-- and its input and output funds. Extracts the 'Api.TxId' from the transaction +-- body. +mkBuilderNewTx :: String -- ^ Builder name. + -> Api.Tx Api.ConwayEra -- ^ Signed transaction. + -> [Fund.Fund] -- ^ Input funds (consumed). + -> [Fund.Fund] -- ^ Output funds (produced). + -> BuilderTrace +mkBuilderNewTx name tx = BuilderNewTx name (Api.getTxId (Api.getTxBody tx)) + +-- | Build a 'BuilderRecycle' trace from the builder name and the recycled +-- output funds. +mkBuilderRecycle :: String -- ^ Builder name. + -> [Fund.Fund] -- ^ Recycled output funds. + -> BuilderTrace +mkBuilderRecycle = BuilderRecycle + +-- | Machine-readable ('forMachine') and human-readable ('forHuman') rendering +-- of 'BuilderTrace' messages. +-- +-- Machine format ('Logging.DNormal'): +-- +-- @ +-- { \"builder\": \"workload-name\" +-- , \"txId\": \"\<64-char hex\>\" +-- , \"inputs\": [\"\#\\", ...] +-- , \"outputs\": [\"\#\\", ...] +-- } +-- @ +-- +-- Machine format ('Logging.DDetailed' and above): +-- +-- @ +-- { \"builder\": \"workload-name\" +-- , \"txId\": \"\<64-char hex\>\" +-- , \"inputs\": [{\"utxo\": \"\#\\", \"lovelace\": 1000000}, ...] +-- , \"outputs\": [{\"utxo\": \"\#\\", \"lovelace\": 500000}, ...] +-- } +-- @ +-- +-- Human format: +-- +-- @ +-- NewTx [workload-name] \ inputs=[\#\,...] outputs=[\#\,...] +-- @ +instance Logging.LogFormatting BuilderTrace where + forMachine dtal (BuilderNewTx name txId inputs outputs) = mconcat + [ "builder" .= name + , "txId" .= String (Api.serialiseToRawBytesHexText txId) + , "inputs" .= map (renderFund dtal) inputs + , "outputs" .= map (renderFund dtal) outputs + ] + forMachine dtal (BuilderRecycle name outputs) = mconcat + [ "builder" .= name + , "outputs" .= map (renderFund dtal) outputs + ] + forHuman (BuilderNewTx name txId inputs outputs) = + "NewTx [" <> Text.pack name <> "] " + <> Api.serialiseToRawBytesHexText txId + <> " inputs=[" <> renderFundTxIns inputs <> "]" + <> " outputs=[" <> renderFundTxIns outputs <> "]" + forHuman (BuilderRecycle name outputs) = + "Recycle [" <> Text.pack name <> "]" + <> " outputs=[" <> renderFundTxIns outputs <> "]" + +-- | Render a single fund for 'forMachine' output. +-- +-- * 'Logging.DMinimal', 'Logging.DNormal': just the UTxO reference as a +-- string (@\"\#\\"@). +-- * 'Logging.DDetailed', 'Logging.DMaximum': a JSON object with @\"utxo\"@ +-- and @\"lovelace\"@ fields. +renderFund :: Logging.DetailLevel -> Fund.Fund -> Value +renderFund dtal fund + | dtal >= Logging.DDetailed = + object [ "utxo" .= Api.renderTxIn (Fund.fundTxIn fund) + , "lovelace" .= Fund.fundValue fund + ] + | otherwise = + String (Api.renderTxIn (Fund.fundTxIn fund)) + +-- | Render a list of funds as comma-separated @\"\#\\"@ references. +renderFundTxIns :: [Fund.Fund] -> Text.Text +renderFundTxIns = Text.intercalate "," . map (Api.renderTxIn . Fund.fundTxIn) + +-- | Namespace: @TxCentrifuge.Builder.NewTx@ and @TxCentrifuge.Builder.Recycle@. +-- The outer prefix @[\"TxCentrifuge\", \"Builder\"]@ is set when creating the +-- tracer via 'Logging.mkCardanoTracer' in 'setupTracers'. +instance Logging.MetaTrace BuilderTrace where + namespaceFor BuilderNewTx{} = Logging.Namespace [] ["NewTx"] + namespaceFor BuilderRecycle{} = Logging.Namespace [] ["Recycle"] + severityFor (Logging.Namespace _ ["NewTx"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["Recycle"]) _ = Just Logging.Info + severityFor _ _ = Nothing + documentFor (Logging.Namespace _ ["NewTx"]) = Just + "A new transaction was built from input UTxOs, producing output UTxOs." + documentFor (Logging.Namespace _ ["Recycle"]) = Just + "Output UTxOs were recycled back to the workload's input queue for reuse." + documentFor _ = Nothing + allNamespaces = + [ Logging.Namespace [] ["NewTx"] + , Logging.Namespace [] ["Recycle"] + ] + +-------------------------------------------------------------------------------- +-- TxSubmission trace messages +-------------------------------------------------------------------------------- + +-- | Clean, structured trace of the TxSubmission2 protocol as seen from +-- the generator side. Replaces the verbose @Show@-based tracing in +-- @ouroboros-network@'s @TraceSendRecv@ with fields that are easy to +-- parse and verify. +-- +-- Every constructor carries a @target@ field identifying the remote +-- node (the 'Runtime.targetName' of the 'Runtime.Target'). +data TxSubmission + = -- | The node asked us to announce transaction identifiers + -- (@MsgRequestTxIds@). + -- + -- * 'String': target node name. + -- * @['Api.TxId']@: TxIds we have not yet received an ACK for. + -- * 'Int': number of TxIds the node is acknowledging (ACK). + -- * 'Int': number of new TxIds the node is requesting (REQ). + RequestTxIds !String [Api.TxId] !Int !Int + -- | We replied to @MsgRequestTxIds@ with TxId\/size pairs. + -- + -- * 'String': target node name. + -- * 'Int': number of TxIds the node is acknowledging (ACK). + -- * 'Int': number of new TxIds the node is requesting (REQ). + -- * @['Api.TxId']@: updated unacked TxIds (after ACK + new announcements). + -- * @['Api.TxId']@: TxIds we announced in this reply. + | ReplyTxIds !String !Int !Int [Api.TxId] [Api.TxId] + -- | The node asked for full transactions by TxId (@MsgRequestTxs@). + -- + -- * 'String': target node name. + -- * @['Api.TxId']@: TxIds the node requested. + | RequestTxs !String [Api.TxId] + -- | We replied to @MsgRequestTxs@ with the requested transactions. + -- + -- * 'String': target node name. + -- * @['Api.TxId']@: TxIds the node requested. + -- * @['Api.TxId']@: TxIds we actually sent (subset of requested; a TxId is + -- missing if it was not in the unacked list). + | ReplyTxs !String [Api.TxId] [Api.TxId] + +-- | Machine-readable and human-readable rendering. All TxId lists are +-- omitted below 'Logging.DDetailed' to avoid the cost of hex-encoding +-- every transaction identifier on every protocol round-trip. +-- +-- Machine format ('Logging.DNormal'): +-- +-- @ +-- { \"target\": \"n\", \"ack\": 0, \"req\": 3 } +-- { \"target\": \"n\" } +-- { \"target\": \"n\" } +-- { \"target\": \"n\" } +-- @ +-- +-- Machine format ('Logging.DDetailed' and above): +-- +-- @ +-- { \"target\": \"n\", \"ack\": 0, \"req\": 3, \"unacked\": [\"ab..\"] } +-- { \"target\": \"n\", \"ack\": 0, \"req\": 3, \"txIds\": [\"ab..\"], \"unacked\": [\"ab..\"] } +-- { \"target\": \"n\", \"txIds\": [\"ab..\"] } +-- { \"target\": \"n\", \"txIds\": [\"ab..\"], \"requested\": [\"ab..\"] } +-- @ +instance Logging.LogFormatting TxSubmission where + forMachine dtal (RequestTxIds target unacked ack req) = mconcat $ + [ "target" .= target + , "ack" .= ack + , "req" .= req + ] + ++ [ "unacked" .= map Api.serialiseToRawBytesHexText unacked + | dtal >= Logging.DDetailed ] + forMachine dtal (ReplyTxIds target ack req unacked announced) = mconcat $ + [ "target" .= target ] + ++ [ "ack" .= ack + | dtal >= Logging.DDetailed ] + ++ [ "req" .= req + | dtal >= Logging.DDetailed ] + ++ [ "txIds" .= map Api.serialiseToRawBytesHexText announced + | dtal >= Logging.DDetailed ] + ++ [ "unacked" .= map Api.serialiseToRawBytesHexText unacked + | dtal >= Logging.DDetailed ] + forMachine dtal (RequestTxs target txIds) = mconcat $ + [ "target" .= target ] + ++ [ "txIds" .= map Api.serialiseToRawBytesHexText txIds + | dtal >= Logging.DDetailed ] + forMachine dtal (ReplyTxs target requested sent) = mconcat $ + [ "target" .= target ] + ++ [ "txIds" .= map Api.serialiseToRawBytesHexText sent + | dtal >= Logging.DDetailed ] + ++ [ "requested" .= map Api.serialiseToRawBytesHexText requested + | dtal >= Logging.DDetailed ] + forHuman (RequestTxIds target _unacked ack req) = + "RequestTxIds [" <> Text.pack target <> "]" + <> " ack=" <> Text.pack (show ack) + <> " req=" <> Text.pack (show req) + forHuman (ReplyTxIds target ack req _unacked _announced) = + "ReplyTxIds [" <> Text.pack target <> "]" + <> " ack=" <> Text.pack (show ack) + <> " req=" <> Text.pack (show req) + forHuman (RequestTxs target _txIds) = + "RequestTxs [" <> Text.pack target <> "]" + forHuman (ReplyTxs target _requested _sent) = + "ReplyTxs [" <> Text.pack target <> "]" + +-- | Namespace: @TxCentrifuge.TxSubmission.*@. The outer prefix +-- is set via 'Logging.mkCardanoTracer' in 'setupTracers'. +instance Logging.MetaTrace TxSubmission where + namespaceFor RequestTxIds{} = Logging.Namespace [] ["RequestTxIds"] + namespaceFor ReplyTxIds{} = Logging.Namespace [] ["ReplyTxIds"] + namespaceFor RequestTxs{} = Logging.Namespace [] ["RequestTxs"] + namespaceFor ReplyTxs{} = Logging.Namespace [] ["ReplyTxs"] + severityFor (Logging.Namespace _ ["RequestTxIds"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["ReplyTxIds"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["RequestTxs"]) _ = Just Logging.Info + severityFor (Logging.Namespace _ ["ReplyTxs"]) _ = Just Logging.Info + severityFor _ _ = Nothing + documentFor (Logging.Namespace _ ["RequestTxIds"]) = Just + "Node requested tx id announcements (blocking or non-blocking)." + documentFor (Logging.Namespace _ ["ReplyTxIds"]) = Just + "We replied with tx id announcements and sizes." + documentFor (Logging.Namespace _ ["RequestTxs"]) = Just + "Node requested full transactions by TxId." + documentFor (Logging.Namespace _ ["ReplyTxs"]) = Just + "We sent the requested transactions." + documentFor _ = Nothing + allNamespaces = + [ Logging.Namespace [] ["RequestTxIds"] + , Logging.Namespace [] ["ReplyTxIds"] + , Logging.Namespace [] ["RequestTxs"] + , Logging.Namespace [] ["ReplyTxs"] + ] diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing/Orphans.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing/Orphans.hs new file mode 100644 index 00000000000..c83979917c5 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tracing/Orphans.hs @@ -0,0 +1,406 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +-------------------------------------------------------------------------------- + +-- | Orphan 'LogFormatting' and 'MetaTrace' instances copied from +-- @cardano-node@ (@NodeToNode.hs@ and @NodeToClient.hs@) so that +-- trace-dispatcher can format TxSubmission2, KeepAlive, and +-- TraceSendRecv messages. +module Cardano.Benchmarking.TxCentrifuge.Tracing.Orphans + () where + +-------------------------------------------------------------------------------- + +----------- +-- aeson -- +----------- +import Data.Aeson (Value (String), (.=)) +---------- +-- text -- +---------- +import Data.Text (pack) +-------------------------- +-- ouroboros-network -- +-------------------------- +-- First two using same qualified as "typed-protocol" imports below. +-- This is two import "NodeToClient.hs" `TraceSendMsg` instances unmmodified. +import Ouroboros.Network.Driver.Simple qualified as Simple +import Ouroboros.Network.Driver.Stateful qualified as Stateful +import Ouroboros.Network.Protocol.KeepAlive.Type qualified as KA +import Ouroboros.Network.Protocol.TxSubmission2.Type qualified as STX +---------------------- +-- trace-dispatcher -- +---------------------- +-- We prefer the qualified import above but used to copy instances unmmodified. +import Cardano.Logging + ( LogFormatting (..) + , MetaTrace (..) + , Namespace (..) + , SeverityS (..) + , nsCast + , nsPrependInner + ) +--------------------- +-- typed-protocols -- +--------------------- +-- First one to copy unmodified the instance definition of `TxSubmissionNode2`. +import Network.TypedProtocol.Codec (AnyMessage (AnyMessageAndAgency)) +import Network.TypedProtocol.Codec qualified as Simple +import Network.TypedProtocol.Stateful.Codec qualified as Stateful + +-- Copied instances: from cardano-node NodeToClient.hs +-------------------------------------------------------------------------------- +-- Driver Simple. +-------------------------------------------------------------------------------- + +instance LogFormatting (Simple.AnyMessage ps) + => LogFormatting (Simple.TraceSendRecv ps) where + forMachine dtal (Simple.TraceSendMsg m) = mconcat + [ "kind" .= String "Send" , "msg" .= forMachine dtal m ] + forMachine dtal (Simple.TraceRecvMsg m) = mconcat + [ "kind" .= String "Recv" , "msg" .= forMachine dtal m ] + + forHuman (Simple.TraceSendMsg m) = "Send: " <> forHuman m + forHuman (Simple.TraceRecvMsg m) = "Receive: " <> forHuman m + + asMetrics (Simple.TraceSendMsg m) = asMetrics m + asMetrics (Simple.TraceRecvMsg m) = asMetrics m + +instance LogFormatting (Stateful.AnyMessage ps f) + => LogFormatting (Stateful.TraceSendRecv ps f) where + forMachine dtal (Stateful.TraceSendMsg m) = mconcat + [ "kind" .= String "Send" , "msg" .= forMachine dtal m ] + forMachine dtal (Stateful.TraceRecvMsg m) = mconcat + [ "kind" .= String "Recv" , "msg" .= forMachine dtal m ] + + forHuman (Stateful.TraceSendMsg m) = "Send: " <> forHuman m + forHuman (Stateful.TraceRecvMsg m) = "Receive: " <> forHuman m + + asMetrics (Stateful.TraceSendMsg m) = asMetrics m + asMetrics (Stateful.TraceRecvMsg m) = asMetrics m + +instance MetaTrace (Simple.AnyMessage ps) => + MetaTrace (Simple.TraceSendRecv ps) where + namespaceFor (Simple.TraceSendMsg msg) = + nsPrependInner "Send" (namespaceFor msg) + namespaceFor (Simple.TraceRecvMsg msg) = + nsPrependInner "Receive" (namespaceFor msg) + + severityFor (Namespace out ("Send" : tl)) (Just (Simple.TraceSendMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Send" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + severityFor (Namespace out ("Receive" : tl)) (Just (Simple.TraceSendMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Receive" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + severityFor _ _ = Nothing + + privacyFor (Namespace out ("Send" : tl)) (Just (Simple.TraceSendMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Send" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + privacyFor (Namespace out ("Receive" : tl)) (Just (Simple.TraceSendMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Receive" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + privacyFor _ _ = Nothing + + detailsFor (Namespace out ("Send" : tl)) (Just (Simple.TraceSendMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Send" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + detailsFor (Namespace out ("Receive" : tl)) (Just (Simple.TraceSendMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Receive" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Simple.AnyMessage ps)) Nothing + detailsFor _ _ = Nothing + + metricsDocFor (Namespace out ("Send" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + metricsDocFor (Namespace out ("Receive" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + metricsDocFor _ = [] + + documentFor (Namespace out ("Send" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + documentFor (Namespace out ("Receive" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Simple.AnyMessage ps)) + documentFor _ = Nothing + + allNamespaces = + let cn = allNamespaces :: [Namespace (Simple.AnyMessage ps)] + in fmap (nsPrependInner "Send") cn ++ fmap (nsPrependInner "Receive") cn + +instance MetaTrace (Stateful.AnyMessage ps f) => + MetaTrace (Stateful.TraceSendRecv ps f) where + namespaceFor (Stateful.TraceSendMsg msg) = + nsPrependInner "Send" (namespaceFor msg) + namespaceFor (Stateful.TraceRecvMsg msg) = + nsPrependInner "Receive" (namespaceFor msg) + + severityFor (Namespace out ("Send" : tl)) (Just (Stateful.TraceSendMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Send" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + + severityFor (Namespace out ("Receive" : tl)) (Just (Stateful.TraceSendMsg msg)) = + severityFor (Namespace out tl) (Just msg) + severityFor (Namespace out ("Receive" : tl)) Nothing = + severityFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + severityFor _ _ = Nothing + + privacyFor (Namespace out ("Send" : tl)) (Just (Stateful.TraceSendMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Send" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + privacyFor (Namespace out ("Receive" : tl)) (Just (Stateful.TraceSendMsg msg)) = + privacyFor (Namespace out tl) (Just msg) + privacyFor (Namespace out ("Receive" : tl)) Nothing = + privacyFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + privacyFor _ _ = Nothing + + detailsFor (Namespace out ("Send" : tl)) (Just (Stateful.TraceSendMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Send" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + detailsFor (Namespace out ("Receive" : tl)) (Just (Stateful.TraceSendMsg msg)) = + detailsFor (Namespace out tl) (Just msg) + detailsFor (Namespace out ("Receive" : tl)) Nothing = + detailsFor (Namespace out tl :: Namespace (Stateful.AnyMessage ps f)) Nothing + detailsFor _ _ = Nothing + + metricsDocFor (Namespace out ("Send" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + metricsDocFor (Namespace out ("Receive" : tl)) = + metricsDocFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + metricsDocFor _ = [] + + documentFor (Namespace out ("Send" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + documentFor (Namespace out ("Receive" : tl)) = + documentFor (nsCast (Namespace out tl) :: Namespace (Stateful.AnyMessage ps f)) + documentFor _ = Nothing + + allNamespaces = + let cn = allNamespaces :: [Namespace (Stateful.AnyMessage ps f)] + in fmap (nsPrependInner "Send") cn ++ fmap (nsPrependInner "Receive") cn + +-- Copied instances: from cardano-node NodeToNode.hs +-------------------------------------------------------------------------------- +-- TxSubmissionNode2 Tracer +-------------------------------------------------------------------------------- + +instance (Show txid, Show tx) + => LogFormatting (AnyMessage (STX.TxSubmission2 txid tx)) where + forMachine _dtal (AnyMessageAndAgency stok STX.MsgInit) = + mconcat + [ "kind" .= String "MsgInit" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok (STX.MsgRequestTxs txids)) = + mconcat + [ "kind" .= String "MsgRequestTxs" + , "agency" .= String (pack $ show stok) + , "txIds" .= String (pack $ show txids) + ] + forMachine _dtal (AnyMessageAndAgency stok (STX.MsgReplyTxs txs)) = + mconcat + [ "kind" .= String "MsgReplyTxs" + , "agency" .= String (pack $ show stok) + , "txs" .= String (pack $ show txs) + ] + forMachine _dtal (AnyMessageAndAgency stok STX.MsgRequestTxIds {}) = + mconcat + [ "kind" .= String "MsgRequestTxIds" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok (STX.MsgReplyTxIds _)) = + mconcat + [ "kind" .= String "MsgReplyTxIds" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok STX.MsgDone) = + mconcat + [ "kind" .= String "MsgDone" + , "agency" .= String (pack $ show stok) + ] + +instance MetaTrace (AnyMessage (STX.TxSubmission2 txid tx)) where + namespaceFor (AnyMessageAndAgency _stok STX.MsgInit {}) = + Namespace [] ["MsgInit"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgRequestTxs {}) = + Namespace [] ["RequestTxIds"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgReplyTxs {}) = + Namespace [] ["ReplyTxIds"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgRequestTxIds {}) = + Namespace [] ["RequestTxs"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgReplyTxIds {}) = + Namespace [] ["ReplyTxs"] + namespaceFor (AnyMessageAndAgency _stok STX.MsgDone {}) = + Namespace [] ["Done"] + + severityFor (Namespace _ ["MsgInit"]) _ = Just Debug + severityFor (Namespace _ ["RequestTxIds"]) _ = Just Debug + severityFor (Namespace _ ["ReplyTxIds"]) _ = Just Debug + severityFor (Namespace _ ["RequestTxs"]) _ = Just Debug + severityFor (Namespace _ ["ReplyTxs"]) _ = Just Debug + severityFor (Namespace _ ["Done"]) _ = Just Debug + severityFor _ _ = Nothing + + documentFor (Namespace _ ["MsgInit"]) = Just + "Client side hello message." + documentFor (Namespace _ ["RequestTxIds"]) = Just $ mconcat + [ "Request a non-empty list of transaction identifiers from the client, " + , "and confirm a number of outstanding transaction identifiers. " + , "\n " + , "With 'TokBlocking' this is a a blocking operation: the response will " + , "always have at least one transaction identifier, and it does not expect " + , "a prompt response: there is no timeout. This covers the case when there " + , "is nothing else to do but wait. For example this covers leaf nodes that " + , "rarely, if ever, create and submit a transaction. " + , "\n " + , "With 'TokNonBlocking' this is a non-blocking operation: the response " + , "may be an empty list and this does expect a prompt response. This " + , "covers high throughput use cases where we wish to pipeline, by " + , "interleaving requests for additional transaction identifiers with " + , "requests for transactions, which requires these requests not block. " + , "\n " + , "The request gives the maximum number of transaction identifiers that " + , "can be accepted in the response. This must be greater than zero in the " + , "'TokBlocking' case. In the 'TokNonBlocking' case either the numbers " + , "acknowledged or the number requested must be non-zero. In either case, " + , "the number requested must not put the total outstanding over the fixed " + , "protocol limit. " + , "\n" + , "The request also gives the number of outstanding transaction " + , "identifiers that can now be acknowledged. The actual transactions " + , "to acknowledge are known to the peer based on the FIFO order in which " + , "they were provided. " + , "\n " + , "There is no choice about when to use the blocking case versus the " + , "non-blocking case, it depends on whether there are any remaining " + , "unacknowledged transactions (after taking into account the ones " + , "acknowledged in this message): " + , "\n " + , "* The blocking case must be used when there are zero remaining " + , " unacknowledged transactions. " + , "\n " + , "* The non-blocking case must be used when there are non-zero remaining " + , " unacknowledged transactions." + ] + documentFor (Namespace _ ["ReplyTxIds"]) = Just $ mconcat + [ "Reply with a list of transaction identifiers for available " + , "transactions, along with the size of each transaction. " + , "\n " + , "The list must not be longer than the maximum number requested. " + , "\n " + , "In the 'StTxIds' 'StBlocking' state the list must be non-empty while " + , "in the 'StTxIds' 'StNonBlocking' state the list may be empty. " + , "\n " + , "These transactions are added to the notional FIFO of outstanding " + , "transaction identifiers for the protocol. " + , "\n " + , "The order in which these transaction identifiers are returned must be " + , "the order in which they are submitted to the mempool, to preserve " + , "dependent transactions." + ] + documentFor (Namespace _ ["RequestTxs"]) = Just $ mconcat + [ "Request one or more transactions corresponding to the given " + , "transaction identifiers. " + , "\n " + , "While it is the responsibility of the replying peer to keep within " + , "pipelining in-flight limits, the sender must also cooperate by keeping " + , "the total requested across all in-flight requests within the limits. " + , "\n" + , "It is an error to ask for transaction identifiers that were not " + , "previously announced (via 'MsgReplyTxIds'). " + , "\n" + , "It is an error to ask for transaction identifiers that are not " + , "outstanding or that were already asked for." + ] + documentFor (Namespace _ ["ReplyTxs"]) = Just $ mconcat + [ "Reply with the requested transactions, or implicitly discard." + , "\n" + , "Transactions can become invalid between the time the transaction " + , "identifier was sent and the transaction being requested. Invalid " + , "(including committed) transactions do not need to be sent." + , "\n" + , "Any transaction identifiers requested but not provided in this reply " + , "should be considered as if this peer had never announced them. (Note " + , "that this is no guarantee that the transaction is invalid, it may still " + , "be valid and available from another peer)." + ] + documentFor (Namespace _ ["Done"]) = Just $ mconcat + [ "Termination message, initiated by the client when the server is " + , "making a blocking call for more transaction identifiers." + ] + documentFor _ = Nothing + + allNamespaces = [ + Namespace [] ["MsgInit"] + , Namespace [] ["RequestTxIds"] + , Namespace [] ["ReplyTxIds"] + , Namespace [] ["RequestTxs"] + , Namespace [] ["ReplyTxs"] + , Namespace [] ["Done"] + ] + +-- Copied instances: from cardano-node NodeToNode.hs +-------------------------------------------------------------------------------- +-- KeepAlive Tracer +-------------------------------------------------------------------------------- + +instance LogFormatting (AnyMessage KA.KeepAlive) where + forMachine _dtal (AnyMessageAndAgency stok KA.MsgKeepAlive {}) = + mconcat + [ "kind" .= String "KeepAlive" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok KA.MsgKeepAliveResponse {}) = + mconcat + [ "kind" .= String "KeepAliveResponse" + , "agency" .= String (pack $ show stok) + ] + forMachine _dtal (AnyMessageAndAgency stok KA.MsgDone) = + mconcat + [ "kind" .= String "Done" + , "agency" .= String (pack $ show stok) + ] + +instance MetaTrace (AnyMessage KA.KeepAlive) where + namespaceFor (AnyMessageAndAgency _stok KA.MsgKeepAlive {}) = + Namespace [] ["KeepAlive"] + namespaceFor (AnyMessageAndAgency _stok KA.MsgKeepAliveResponse {}) = + Namespace [] ["KeepAliveResponse"] + namespaceFor (AnyMessageAndAgency _stok KA.MsgDone) = + Namespace [] ["Done"] + + severityFor (Namespace _ ["KeepAlive"]) _ = Just Debug + severityFor (Namespace _ ["KeepAliveResponse"]) _ = Just Debug + severityFor (Namespace _ ["Done"]) _ = Just Debug + severityFor _ _ = Nothing + + documentFor (Namespace _ ["KeepAlive"]) = Just + "Client side message to keep the connection alive." + documentFor (Namespace _ ["KeepAliveResponse"]) = Just $ mconcat + [ "Server side response to a previous client KeepAlive message." + ] + documentFor (Namespace _ ["Done"]) = Just $ mconcat + [ "Termination message, initiated by the client." + ] + documentFor _ = Nothing + + allNamespaces = [ + Namespace [] ["KeepAlive"] + , Namespace [] ["KeepAliveResponse"] + , Namespace [] ["Done"] + ] diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs new file mode 100644 index 00000000000..a958bab1732 --- /dev/null +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs @@ -0,0 +1,168 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Cardano.Benchmarking.TxCentrifuge.Tx + ( buildTx + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Data.Function ((&)) +import Data.List (nubBy) +import Numeric.Natural (Natural) +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +------------------------- +-- cardano-ledger-core -- +------------------------- +import Cardano.Ledger.Coin qualified as L +--------------------- +-- tx-centrifuge -- +--------------------- +import Cardano.Benchmarking.TxCentrifuge.Fund ( Fund(..) ) + +-------------------------------------------------------------------------------- + +-- | Build and sign a transaction consuming the given funds and producing +-- @numOutputs@ outputs to @destAddr@. Returns the signed transaction and +-- recycled funds (one per output, keyed with @outKey@ for future spending). +-- +-- Signing keys are extracted from the input funds. If inputs belong to +-- different keys, all unique keys are used as witnesses. +-- +-- Fixed to ConwayEra. No Plutus, no metadata, fixed fee. +buildTx + -- | Ledger protocol parameters. + :: Api.LedgerProtocolParameters Api.ConwayEra + -- | Destination address for outputs (embeds the network identifier). + -> Api.AddressInEra Api.ConwayEra + -- | Signing key for recycled output funds. + -> Api.SigningKey Api.PaymentKey + -- | Input funds. + -> [Fund] + -- | Number of outputs. + -> Natural + -- | Fee. + -> L.Coin + -> Either String (Api.Tx Api.ConwayEra, [Fund]) +buildTx ledgerPP destAddr outKey inFunds numOutputs fee + | null inFunds = Left "buildTx: no input funds" + | numOutputs == 0 = Left "buildTx: outputs_per_tx must be >= 1" + | feeLovelace < 0 = Left "buildTx: fee must be >= 0" + | changeTotal <= 0 = Left $ "buildTx: insufficient funds — total inputs (" + ++ show totalIn ++ " lovelace) do not cover fee (" + ++ show feeLovelace ++ " lovelace)" + -- Guard against outputs that would be below the Cardano minimum UTxO + -- value. We cannot check the actual protocol-parameter minimum here (it + -- depends on the serialised output size and the current coinsPerUTxOByte), + -- but we can catch the obviously-invalid case where integer division + -- produces zero-value or negative outputs. A real minimum UTxO check + -- should be added once the protocol parameters are threaded through to this + -- function. + | minOutputLovelace <= 0 = Left $ "buildTx: output value too low — " + ++ show numOutputs ++ " outputs from " + ++ show changeTotal ++ " lovelace change yields " + ++ show minOutputLovelace ++ " lovelace per output" + | otherwise = + let maybeTxBody = Api.createTransactionBody + (Api.shelleyBasedEra @Api.ConwayEra) + txBodyContent + in case maybeTxBody of + Left err -> Left ("buildTx: " ++ show err) + Right txBody -> + let signedTx = Api.signShelleyTransaction + (Api.shelleyBasedEra @Api.ConwayEra) + txBody + (map Api.WitnessPaymentKey uniqueKeys) + txId = Api.getTxId txBody + outFunds = [ Fund { fundTxIn = Api.TxIn txId (Api.TxIx ix) + , fundValue = amt + , fundSignKey = outKey + } + | (ix, amt) <- zip [0..] outAmounts + ] + in Right (signedTx, outFunds) + where + + totalIn :: Integer + totalIn = sum (map fundValue inFunds) + + feeLovelace :: Integer + feeLovelace = let L.Coin c = fee in c + + changeTotal :: Integer + changeTotal = totalIn - feeLovelace + + -- Minimum per-output lovelace amount (used for the zero-value guard above). + minOutputLovelace :: Integer + minOutputLovelace = changeTotal `div` fromIntegral numOutputs + + -- Split change evenly; first output absorbs the remainder. + outAmounts :: [Integer] + outAmounts = + let base = changeTotal `div` fromIntegral numOutputs + remainder = changeTotal `mod` fromIntegral numOutputs + in (base + remainder) : replicate (fromIntegral numOutputs - 1) base + + -- Unique signing keys from input funds + -- (deduplicated by verification key hash). + uniqueKeys :: [Api.SigningKey Api.PaymentKey] + uniqueKeys = nubBy sameKey (map fundSignKey inFunds) + where + sameKey + :: Api.SigningKey Api.PaymentKey + -> Api.SigningKey Api.PaymentKey + -> Bool + sameKey a b = Api.verificationKeyHash (Api.getVerificationKey a) + == Api.verificationKeyHash (Api.getVerificationKey b) + + txIns + :: [ ( Api.TxIn + , Api.BuildTxWith Api.BuildTx + (Api.Witness Api.WitCtxTxIn Api.ConwayEra) + ) + ] + txIns = map + (\f -> + ( fundTxIn f + , Api.BuildTxWith + (Api.KeyWitness Api.KeyWitnessForSpending) + ) + ) inFunds + + mkTxOut :: Integer -> Api.TxOut Api.CtxTx Api.ConwayEra + mkTxOut lovelace = Api.TxOut + destAddr + ( Api.shelleyBasedEraConstraints + (Api.shelleyBasedEra @Api.ConwayEra) $ + Api.lovelaceToTxOutValue + (Api.shelleyBasedEra @Api.ConwayEra) + (Api.Coin lovelace) + ) + Api.TxOutDatumNone + Api.ReferenceScriptNone + + txBodyContent :: Api.TxBodyContent Api.BuildTx Api.ConwayEra + txBodyContent = Api.defaultTxBodyContent Api.ShelleyBasedEraConway + & Api.setTxIns txIns + & Api.setTxInsCollateral Api.TxInsCollateralNone + & Api.setTxOuts (map mkTxOut outAmounts) + & Api.setTxFee + ( Api.TxFeeExplicit + (Api.shelleyBasedEra @Api.ConwayEra) + (Api.Coin feeLovelace) + ) + & Api.setTxValidityLowerBound Api.TxValidityNoLowerBound + & Api.setTxValidityUpperBound + ( Api.defaultTxValidityUpperBound + Api.ShelleyBasedEraConway + ) + & Api.setTxMetadata Api.TxMetadataNone + & Api.setTxProtocolParams (Api.BuildTxWith (Just ledgerPP)) diff --git a/bench/tx-centrifuge/test/lib/Test/PullFiction/Harness.hs b/bench/tx-centrifuge/test/lib/Test/PullFiction/Harness.hs new file mode 100644 index 00000000000..66c63e69d1a --- /dev/null +++ b/bench/tx-centrifuge/test/lib/Test/PullFiction/Harness.hs @@ -0,0 +1,533 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + + +-------------------------------------------------------------------------------- + +module Test.PullFiction.Harness + ( -- * Test results + TestResult(..) + -- * Naming helpers + , targetName + , nodeName + -- * Running tests + , resolveConfig + , loadConfig + , runTest + , runTpsTest + , runPipelineIsolationTest + -- * Metrics & formatting + , getDuration + , formatMetrics + , formatDuration + -- * Assertions (pure) + , checkElapsedTolerance + , checkTpsTolerance + , checkTargetFairness + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import Control.Concurrent (myThreadId, threadDelay) +import Control.Exception (onException, throwIO) +import Control.Monad (forever, replicateM_, when) +import GHC.Conc (labelThread) +import Data.IORef qualified as IORef +import Data.List (intercalate) +import Data.List.NonEmpty qualified as NE +import System.Environment (lookupEnv) +import Text.Read (readMaybe) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +----------- +-- async -- +----------- +import Control.Concurrent.Async qualified as Async +----------- +-- clock -- +----------- +-- NOTE: System.Clock is used directly here (rather than PullFiction.Clock) +-- intentionally. The harness measures overall test wall-clock time, which is +-- independent of the rate-limiter's internal clock. Keeping them separate +-- ensures that test timing cannot be influenced by any future changes to +-- PullFiction.Clock. +import System.Clock qualified as Clock +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +--------- +-- stm -- +--------- +import Control.Concurrent.STM qualified as STM +--------------------- +-- pull-fiction -- +--------------------- +import Cardano.Benchmarking.PullFiction.Config.Runtime qualified as Runtime +import Cardano.Benchmarking.PullFiction.Config.Validated qualified as Validated +import Cardano.Benchmarking.PullFiction.WorkloadRunner (runWorkload) + +-------------------------------------------------------------------------------- + +-- | Aggregate results from a TPS test run. +data TestResult = TestResult + { -- | Wall-clock time the test actually ran. + elapsedSeconds :: !Double + -- | Actual token count per target, keyed by target name. + , targetCounts :: !(Map.Map String Int) + } deriving (Show) + +-------------------------------------------------------------------------------- +-- Naming helpers +-------------------------------------------------------------------------------- + +-- | Qualified target name: @\"workload\/target\"@. This is the key format used +-- by 'runTpsTest' for per-target counters. +targetName :: String -> String -> String +targetName workload target = workload ++ "/" ++ target + +-- | Node name matching the config's @\"node-NN\"@ zero-padded naming scheme +-- (e.g. @\"node-01\"@, @\"node-50\"@). +nodeName :: Int -> String +nodeName i = "node-" ++ (if i < 10 then "0" else "") ++ show i + +-------------------------------------------------------------------------------- +-- Running tests +-------------------------------------------------------------------------------- + +-- | Decode a JSON config with pre-built inputs and resolve into a +-- 'Runtime.Runtime'. +-- +-- This is the common entry point for tests that need a resolved pipeline. +-- 'loadConfig' is a thin wrapper for the common case of @()@ inputs. +resolveConfig :: FilePath -> NE.NonEmpty input -> IO (Runtime.Runtime input payload) +resolveConfig path inputs = do + raw <- Aeson.eitherDecodeFileStrict path >>= either fail pure + validated <- either fail pure $ Validated.validate raw inputs + Runtime.resolve validated + +-- | Load a generator config from a JSON file with dummy inputs and resolve into +-- a 'Runtime.Runtime'. +-- +-- Useful for tests that only need config metadata (rate limits, targets) and do +-- not use the input pipeline. +loadConfig :: FilePath -> IO (Runtime.Runtime () payload) +loadConfig path = resolveConfig path (() NE.:| []) + +-- | Run the pipeline scaffolding shared by all test runners. +-- +-- Spawns one builder per workload that reads an @input@ from the input queue +-- and writes @(input, [input])@ to the payload queue (the input IS the payload; +-- the output list carries the same input for 'runWorkload' to recycle back to +-- the input queue). +-- +-- 'Runtime.resolve' loads initial inputs into the input queue ('TQueue'), but +-- the payload queue ('TBQueue') starts empty. The builder moves inputs between +-- them, but it runs asynchronously — workers could start before the builder has +-- been scheduled. A synchronous pre-fill transfers inputs from the input queue +-- to the payload queue (up to its 8192 capacity, or until the input queue is +-- exhausted) so workers never see an empty payload queue at startup. +-- +-- Then spawns workers via 'runWorkload', passing the caller-supplied callback. +-- After the configured duration, cancels all builders and workers and returns +-- the elapsed wall-clock time. +-- +-- @payload = input@ — the builder treats the input itself as the payload. +runTest + :: Runtime.Runtime input input + -> Double -- ^ Duration in seconds. + -> (Runtime.Workload input input -- ^ Workload the worker belongs to. + -> Runtime.Target input input -- ^ Target the worker serves. + -> IO input -- ^ Blocking fetch (rate-limited). + -> IO (Maybe input) -- ^ Non-blocking fetch. + -> IO () -- ^ Worker body. + ) + -> IO Double -- ^ Elapsed wall-clock seconds. +runTest runtime durationSecs workerBody = do + let allWorkloads = Map.elems (Runtime.workloads runtime) + -- Spawn one builder per workload: read an input from the input queue and + -- pass it through to the payload queue as an (input, [input]) pair. + -- runWorkload's fetchPayload recycles [input] back to the input queue, + -- closing the loop. + builders <- mapM + (\builder -> do + let inputsQueue = Runtime.pipeInputQueue (Runtime.builderPipe builder) + payloadsQueue = Runtime.pipePayloadQueue (Runtime.builderPipe builder) + -- Pre-fill: move inputs from the input queue to the payload queue so + -- workers don't see an empty payload queue at startup. Iterates up to + -- 8192 times (the TBQueue capacity); if the input queue has fewer items, + -- the remaining iterations are no-ops via tryReadTQueue. + replicateM_ 8192 $ STM.atomically $ do + maybeInput <- STM.tryReadTQueue inputsQueue + case maybeInput of + Nothing -> pure () + Just input -> STM.writeTBQueue payloadsQueue (input, [input]) + -- Steady-state: builder runs forever, refilling as workers consume. + Async.async $ do + threadId <- myThreadId + labelThread threadId "builder" + forever $ STM.atomically $ do + input <- STM.readTQueue inputsQueue + STM.writeTBQueue payloadsQueue (input, [input]) + ) + (Runtime.builders runtime) + -- Start time. + start <- Clock.getTime Clock.MonotonicRaw + -- Spawn workers via runWorkload, passing the caller-supplied callbacks. + workers <- concat <$> mapM + (\workload -> runWorkload workload $ + \target fetchPayload tryFetchPayload -> workerBody workload target fetchPayload tryFetchPayload + ) + allWorkloads + -- Race the test duration against any async dying. Exceptions are thrown + -- synchronously (not via Async.link) so Tasty's withResource can properly + -- cache and propagate them to all test cases in the group. + let allAsyncs = builders ++ workers + cancelAll = mapM_ Async.cancel allAsyncs + winner <- Async.race + (threadDelay (round (durationSecs * 1_000_000))) + (Async.waitAnyCatch allAsyncs) + `onException` cancelAll + -- End time. + end <- Clock.getTime Clock.MonotonicRaw + cancelAll + case winner of + Right (_, Left ex) -> throwIO ex + _ -> pure () + -- Return with the elapsed time. + pure $ fromIntegral (Clock.toNanoSecs (end - start)) / 1e9 + +-- | Decode a JSON config, create @()@ inputs, resolve into a +-- 'Runtime.Runtime', then run the pipeline, collecting per-target token +-- counts. +-- +-- The pipeline is trivial: a builder thread reads @()@ from the input queue +-- and writes @((), [()])@ to the payload queue; 'runWorkload' handles rate +-- limiting and input recycling; the worker callback just increments a +-- per-target counter. +-- +-- The caller is responsible for checking the returned 'TestResult' against its +-- own expected TPS map via 'checkTpsTolerance', 'checkTargetFairness', etc. +runTpsTest + -- | Path to the JSON config file. + :: FilePath + -- | Test duration in seconds. + -> Double + -> IO TestResult +runTpsTest configPath durationSecs = do + runtime <- resolveConfig configPath (() NE.:| replicate 99_999 ()) + -- Per-target counters keyed by "workloadName/targetName". + let allTargets = concatMap + (\wl -> map + (\rt -> + targetName (Runtime.workloadName wl) (Runtime.targetName rt) + ) + (Map.elems (Runtime.targets wl)) + ) + (Map.elems (Runtime.workloads runtime)) + counters <- Map.fromList <$> mapM + (\key -> do + ref <- IORef.newIORef (0 :: Int) + pure (key, ref) + ) + allTargets + -- Each worker calls fetchPayload in a loop, increments its counter, and + -- recycles the input back to the pipe for the builder to reuse. + elapsed <- runTest runtime durationSecs $ + \workload target fetchPayload _tryFetchPayload -> do + let key = targetName (Runtime.workloadName workload) + (Runtime.targetName target) + ref = counters Map.! key + forever $ do + _ <- fetchPayload + IORef.atomicModifyIORef' ref (\c -> (c + 1, ())) + -- Collect results. + perTarget <- Map.fromList <$> mapM + (\(key, ref) -> do + c <- IORef.readIORef ref + pure (key, c) + ) + (Map.toList counters) + -- Returns the map with the tokens per target. + pure TestResult + { elapsedSeconds = elapsed + , targetCounts = perTarget + } + +-- | Run a pipeline isolation test that verifies each workload's input recycling +-- loop is closed: inputs tagged for workload N are only ever observed by +-- workload N's workers, never by another workload. +-- +-- Inputs are @(Int, Int)@ tuples where the first element is the workload index +-- and the second is an input identifier within that workload. +-- 'Runtime.resolve' partitions inputs in ascending workload-key order, so +-- workload @i@ (0-based by key order) receives only inputs whose first element +-- is @i@. This also tests the partition logic itself. +-- +-- If any worker observes an input with a foreign workload tag, the test fails +-- immediately. Both 'fetchPayload' (blocking) and 'tryFetchPayload' +-- (non-blocking) paths are exercised on every iteration. +runPipelineIsolationTest + -- | Path to the JSON config file. + :: FilePath + -- | Number of workloads (must match config). + -> Int + -- | Test duration in seconds. + -> Double + -> IO () +runPipelineIsolationTest configPath nWorkloads durationSecs = do + let inputsPerWorkload = 2000 + taggedInputs = + [ (i, j) + | i <- [0 :: Int .. nWorkloads - 1] + , j <- [0 :: Int .. inputsPerWorkload - 1] + ] + inputs <- case taggedInputs of + (t:ts) -> pure (t NE.:| ts) + [] -> fail "runPipelineIsolationTest: nWorkloads must be >= 1" + runtime <- resolveConfig configPath inputs + -- Workloads are stored in a Map, so keys are ascending. + -- resolve partitions contiguous chunks in the same order. + let nameToTag = Map.fromList $ + zip (Map.keys (Runtime.workloads runtime)) [0 :: Int ..] + -- Workers: fetch payload (= input tag), assert it matches the workload. + -- fetchPayload and tryFetchPayload recycle consumed inputs automatically + -- (see 'TargetWorker'); the worker only checks the tag. Both blocking and + -- non-blocking paths are exercised on every iteration, verifying closed-loop + -- recycling in both code paths. + _ <- runTest runtime durationSecs $ + \workload _target fetchPayload tryFetchPayload -> do + let wlName = Runtime.workloadName workload + expectedTag = nameToTag Map.! wlName + check (wlIdx, _) = + when (wlIdx /= expectedTag) $ + fail $ "Input leakage: workload " ++ wlName + ++ " (tag " ++ show expectedTag + ++ ") received input tagged " ++ show wlIdx + forever $ do + tag <- fetchPayload + check tag + mTag <- tryFetchPayload + case mTag of + Nothing -> pure () + Just tag' -> check tag' + pure () + +-------------------------------------------------------------------------------- +-- Metrics & formatting +-------------------------------------------------------------------------------- + +-- | Default test duration in seconds +-- (overridable via PULL_FICTION_TEST_DURATION_SECS). +defaultDuration :: Double +defaultDuration = 60.0 + +-- | Read test duration from the @PULL_FICTION_TEST_DURATION_SECS@ environment +-- variable, falling back to 'defaultDuration' (60 s). +getDuration :: IO Double +getDuration = do + env <- lookupEnv "PULL_FICTION_TEST_DURATION_SECS" + pure $ maybe defaultDuration + (\s -> maybe defaultDuration id (readMaybe s)) env + +-- | Format a duration as a compact string for test group titles (e.g. @60.0@ +-- becomes @\"60s\"@, @5.0@ becomes @\"5s\"@). +formatDuration :: Double -> String +formatDuration d = show (round d :: Int) ++ "s" + +-- | Format a full metrics summary as a string. +-- Suitable for use as the result description in 'testCaseInfo'. +formatMetrics + :: Double -- ^ Configured test duration in seconds. + -> Map.Map String Double -- ^ Expected TPS per target. + -> TestResult -> String +formatMetrics cfgDuration expectedTps r = intercalate "\n" + [ "Global" + , " targets: " ++ show (Map.size (targetCounts r)) + , " duration: " ++ formatFixed 2 dur ++ " s" + ++ " (target " ++ formatFixed 0 cfgDuration + ++ " s, " ++ formatSignedPct durErr ++ "%)" + , " configured TPS: " ++ show (round cfgTps :: Int) + , " actual TPS: " ++ show (round actualTps :: Int) + ++ " (" ++ formatSignedPct tpsErr ++ "%)" + , " total tokens: " ++ show totalTokens + ++ " (expected " ++ show expected + ++ ", " ++ formatSignedPct tokenErr ++ "%)" + , "Per-target tokens" + , " mean: " ++ show (round tMean :: Int) + ++ biasT (round tMean :: Int) + , " min: " ++ show tMin ++ biasT tMin + , " max: " ++ show tMax ++ biasT tMax + , " spread (max-min): " ++ show tSpread + ++ " (" ++ formatFixed 1 tSpreadPct ++ "% of ideal)" + , " worst deviation: " ++ formatFixed 1 tWorstDev ++ "% from mean" + , " std deviation: " ++ show (round tStddev :: Int) + , " CV: " ++ formatFixed 2 tCv ++ "%" + , "Per-target TPS" + , " mean: " ++ formatFixed tpsDp sMean ++ biasS sMean + , " min: " ++ formatFixed tpsDp sMin ++ biasS sMin + , " max: " ++ formatFixed tpsDp sMax ++ biasS sMax + , " spread (max-min): " ++ formatFixed tpsDp sSpread + ++ " (" ++ formatFixed 1 sSpreadPct ++ "% of ideal)" + , " worst deviation: " ++ formatFixed 1 sWorstDev ++ "% from mean" + , " std deviation: " ++ formatFixed tpsDp sStddev + , " CV: " ++ formatFixed 2 sCv ++ "%" + ] + where + durErr = (elapsedSeconds r - cfgDuration) + / cfgDuration * 100 + totalTokens = sum (Map.elems (targetCounts r)) + cfgTps = sum (Map.elems expectedTps) + actualTps = fromIntegral totalTokens / elapsedSeconds r + tpsErr = (actualTps - cfgTps) / cfgTps * 100 + expected = round (cfgTps * elapsedSeconds r) :: Int + tokenErr = (fromIntegral totalTokens - fromIntegral expected) + / fromIntegral expected * 100 :: Double + counts = Map.elems (targetCounts r) + dur = elapsedSeconds r + n = fromIntegral (length counts) :: Double + -- Ideal per-target values (mean of expectedTps). + idealTps = sum (Map.elems expectedTps) + / fromIntegral (Map.size expectedTps) + idealTokens = idealTps * dur + -- Token stats + tMean = fromIntegral (sum counts) / n + tMin = minimum counts + tMax = maximum counts + tSpread = tMax - tMin + tSpreadPct = fromIntegral tSpread / idealTokens * 100 + tWorstDev = maximum + (map (\c -> abs (fromIntegral c - tMean) + / tMean) counts) * 100 + tVariance = sum (map (\c -> (fromIntegral c - tMean) ** 2) counts) / n + tStddev = sqrt tVariance + tCv = tStddev / tMean * 100 + biasT v = let d = fromIntegral v - idealTokens + p' = d / idealTokens * 100 + in " (ideal " ++ show (round idealTokens :: Int) + ++ ", " ++ formatSignedPct p' ++ "%)" + -- TPS stats (tokens / duration per target) + tpsList = map (\c -> fromIntegral c / dur) counts :: [Double] + sMean = sum tpsList / n + sMin = minimum tpsList + sMax = maximum tpsList + sSpread = sMax - sMin + sSpreadPct = sSpread / idealTps * 100 + sWorstDev = maximum (map (\s -> abs (s - sMean) / sMean) tpsList) * 100 + sVariance = sum (map (\s -> (s - sMean) ** 2) tpsList) / n + sStddev = sqrt sVariance + sCv = sStddev / sMean * 100 + -- Decimal places: use 0 when per-target TPS >= 1, otherwise enough to show + -- the leading significant digit plus one extra for resolution (e.g. 0.2 + -- TPS -> 2 dp so min/max/spread are distinguishable). + tpsDp = if idealTps >= 1 then 0 + else max 1 (ceiling (negate (logBase 10 idealTps)) + 1 :: Int) + biasS v = let d = v - idealTps + p' = d / idealTps * 100 + in " (ideal " ++ formatFixed tpsDp idealTps + ++ ", " ++ formatSignedPct p' ++ "%)" + +-- | Format a 'Double' with exactly @n@ decimal places, rounding half-up. +-- +-- >>> formatFixed 2 3.1415 +-- "3.14" +-- >>> formatFixed 0 99.7 +-- "100" +formatFixed :: Int -> Double -> String +formatFixed 0 x = show (round x :: Int) +formatFixed decimals x = + let factor = 10 ^ decimals :: Int + scaled = round (x * fromIntegral factor) :: Int + (whole, frac) = scaled `quotRem` factor + fracStr = let s = show (abs frac) + in replicate (decimals - length s) '0' ++ s + in (if x < 0 && whole == 0 then "-" else "") ++ show whole ++ "." ++ fracStr + +-- | Format a percentage value with a leading sign (@+@ or @-@) and one decimal +-- place. Used in metrics output to show relative deviations. +-- +-- >>> formatSignedPct 3.14 +-- "+3.1" +-- >>> formatSignedPct (-0.5) +-- "-0.5" +formatSignedPct :: Double -> String +formatSignedPct x = (if x >= 0 then "+" else "") ++ formatFixed 1 x + +-------------------------------------------------------------------------------- +-- Assertions (pure) +-------------------------------------------------------------------------------- + +-- | Check that the elapsed wall-clock time is within the given relative +-- tolerance of the configured duration. Returns 'Nothing' on success, or 'Just' +-- an error message on failure. +-- +-- A test that overshoots significantly (e.g. 231s vs 60s) indicates that the +-- rate-limiting mechanism cannot keep up: the feeder loop overhead exceeds the +-- target inter-tick delay. +checkElapsedTolerance + :: Double -- ^ Tolerance (e.g. 0.05 for 5%). + -> Double -- ^ Configured test duration in seconds. + -> TestResult -> Maybe String +checkElapsedTolerance tolerance cfgDuration result + | abs pctErr / 100 <= tolerance = Nothing + | otherwise = Just $ + "elapsed " ++ formatFixed 1 actual ++ " s (" + ++ (if pctErr >= 0 then "+" else "") ++ show (round pctErr :: Int) + ++ "%) vs target " ++ formatFixed 0 cfgDuration ++ " s" + where + actual = elapsedSeconds result + pctErr = (actual - cfgDuration) / cfgDuration * 100 + +-- | Check that actual TPS is within the given relative tolerance of configured +-- TPS. Returns 'Nothing' on success, or 'Just' an error message on failure. +checkTpsTolerance + :: Double -- ^ Tolerance (e.g. 0.05 for 5%). + -> Map.Map String Double -- ^ Expected TPS per target. + -> TestResult -> Maybe String +checkTpsTolerance tolerance expectedTps result + | abs pctErr / 100 <= tolerance = Nothing + | otherwise = Just $ + "actual " ++ show (round actualTps :: Int) ++ " TPS (" + ++ (if pctErr >= 0 then "+" else "") ++ show (round pctErr :: Int) + ++ "%) vs target " ++ show (round cfgTps :: Int) + where + totalTokens = sum (Map.elems (targetCounts result)) + cfgTps = sum (Map.elems expectedTps) + actualTps = fromIntegral totalTokens / elapsedSeconds result + pctErr = (actualTps - cfgTps) / cfgTps * 100 + +-- | Check a single target's token count against its expected TPS. Returns +-- 'Nothing' on success, or 'Just' an error message on failure. +-- +-- Applies a per-target discrete-distribution continuity correction: the actual +-- token count is an integer, so even a perfect system deviates from a +-- non-integer expected count by at least the rounding distance. We subtract +-- this /quantization floor/ so that the tolerance measures only the /excess/ +-- deviation attributable to the scheduling algorithm, not to integer +-- arithmetic. +checkTargetFairness + :: Double -- ^ Tolerance (e.g. 0.10 for 10%). + -> Map.Map String Double -- ^ Expected TPS per target. + -> TestResult -> String -> Maybe String +checkTargetFairness tolerance expectedTps result name + | excessDev <= tolerance = Nothing + | otherwise = Just $ + show (round (dev * 100) :: Int) ++ "% from expected " + ++ show (round expectedCount :: Int) + ++ " (actual " ++ show actual ++ ")" + where + actual = Map.findWithDefault 0 name (targetCounts result) + elapsed = elapsedSeconds result + eTps = Map.findWithDefault 0 name expectedTps + expectedCount = eTps * elapsed + dev = abs (fromIntegral actual - expectedCount) / expectedCount + frac = expectedCount - fromIntegral (floor expectedCount :: Int) + qFloor + | frac == 0 = 0 + | otherwise = min frac (1 - frac) / expectedCount + excessDev = max 0 (dev - qFloor) diff --git a/bench/tx-centrifuge/test/pull-fiction/Main.hs b/bench/tx-centrifuge/test/pull-fiction/Main.hs new file mode 100644 index 00000000000..94717d6e689 --- /dev/null +++ b/bench/tx-centrifuge/test/pull-fiction/Main.hs @@ -0,0 +1,25 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +module Main where + +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +import Test.Tasty.Runners qualified as Tasty +--------------------- +-- pull-fiction -- +--------------------- +import Test.PullFiction.GeneratorTest qualified as GeneratorTest +import Test.PullFiction.Harness qualified as Harness +import Test.PullFiction.PipelineTest qualified as PipelineTest + +main :: IO () +main = do + dur <- Harness.getDuration + Tasty.defaultMain + $ Tasty.localOption (Tasty.NumThreads 1) + $ Tasty.testGroup "pull-fiction" + [ GeneratorTest.generatorTests dur + , PipelineTest.pipelineTests dur + ] diff --git a/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/GeneratorTest.hs b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/GeneratorTest.hs new file mode 100644 index 00000000000..c93c88328fc --- /dev/null +++ b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/GeneratorTest.hs @@ -0,0 +1,127 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} + +-------------------------------------------------------------------------------- + +module Test.PullFiction.GeneratorTest + ( generatorTests + ) where + +-------------------------------------------------------------------------------- + +---------------- +-- containers -- +---------------- +import Data.Map.Strict qualified as Map +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +----------------- +-- tasty-hunit -- +----------------- +import Test.Tasty.HUnit qualified as HUnit +--------------------- +-- pull-fiction -- +--------------------- +import Paths_tx_centrifuge qualified as Paths +import Test.PullFiction.Harness qualified as Harness + +-------------------------------------------------------------------------------- + +generatorTests :: Double -> Tasty.TestTree +generatorTests duration = Tasty.testGroup "TPS" + [ -- A "shared" global rate limiter. + tpsTestGroup "Shared-limiter mode (50 targets, 10 TPS" + "data/config-shared-10.json" + (Map.fromList + [ (Harness.targetName "default" (Harness.nodeName i), 0.2) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.15 + , tpsTestGroup "Shared-limiter mode (50 targets, 100k TPS" + "data/config-shared-100k.json" + (Map.fromList + [ (Harness.targetName "default" (Harness.nodeName i), 2_000) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.15 + -- A "per_target" scoped rate limiter. Lower per-target tolerance. + , tpsTestGroup "Per-target-limiter mode (50 targets, 0.2 TPS/target" + "data/config-per-target-0_2.json" + (Map.fromList + [ (Harness.targetName "default" (Harness.nodeName i), 0.20) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.05 + , tpsTestGroup "Per-target-limiter mode (50 targets, 2k TPS/target" + "data/config-per-target-2k.json" + (Map.fromList + [(Harness.targetName "default" (Harness.nodeName i), 2_000) + | i <- [1..50] + ] + ) + duration + 0.05 + 0.05 + ] + +tpsTestGroup + :: String -- ^ Test group label (duration is appended). + -> String -- ^ Data-file config name. + -> Map.Map String Double -- ^ Expected TPS per target, keyed by name. + -> Double -- ^ Test duration in seconds. + -> Double -- ^ Global TPS tolerance. + -> Double -- ^ Per-target fairness tolerance. + -> Tasty.TestTree +tpsTestGroup label configName expectedMap duration globalTol fairnessTol = + Tasty.withResource + (do path <- Paths.getDataFileName configName + Harness.runTpsTest path duration + ) + (const $ pure ()) + $ \getResult -> + Tasty.testGroup + (label ++ ", " ++ Harness.formatDuration duration ++ ")") + [ -- Total elapsed time. + HUnit.testCase + ("Elapsed time within " + ++ show (round (globalTol * 100) :: Int) ++ "% of target" + ) $ do + result <- getResult + case Harness.checkElapsedTolerance globalTol duration result of + Nothing -> pure () + Just err -> HUnit.assertFailure err + -- Total TPS. + , HUnit.testCaseInfo + ("Global TPS within " + ++ show (round (globalTol * 100) :: Int) ++ "% tolerance" + ) $ do + result <- getResult + let metrics = Harness.formatMetrics duration expectedMap result + case Harness.checkTpsTolerance globalTol expectedMap result of + Nothing -> pure metrics + Just err -> HUnit.assertFailure + (err ++ "\n" ++ metrics) + -- TPS per target. + , Tasty.testGroup + ("Per-target TPS within " + ++ show (round (fairnessTol * 100) :: Int) ++ "% of expected" + ) + [ HUnit.testCase name $ do + result <- getResult + case Harness.checkTargetFairness fairnessTol expectedMap result name of + Nothing -> pure () + Just err -> HUnit.assertFailure err + | name <- Map.keys expectedMap + ] + ] diff --git a/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/PipelineTest.hs b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/PipelineTest.hs new file mode 100644 index 00000000000..6140aa30a86 --- /dev/null +++ b/bench/tx-centrifuge/test/pull-fiction/Test/PullFiction/PipelineTest.hs @@ -0,0 +1,59 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +-------------------------------------------------------------------------------- + +module Test.PullFiction.PipelineTest + ( pipelineTests + ) where + +-------------------------------------------------------------------------------- + +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +----------------- +-- tasty-hunit -- +----------------- +import Test.Tasty.HUnit qualified as HUnit +--------------------- +-- pull-fiction -- +--------------------- +import Paths_tx_centrifuge qualified as Paths +import Test.PullFiction.Harness qualified as Harness + +-------------------------------------------------------------------------------- + +pipelineTests :: Double -> Tasty.TestTree +pipelineTests dur = Tasty.testGroup "pipeline" + [ + -- Pipeline test 1: single-group, per-group input queue. + -- ------------------------------------------------- + -- + -- 1 workload with 50 targets sharing one input queue. Recycled inputs + -- return to the same queue. Exercises Runtime.resolve with 1 workload, + -- verifying that closed-loop recycling delivers tokens to every target and + -- inputs stay within the workload. + + HUnit.testCase + ("Single-group pipeline (50 targets, " ++ Harness.formatDuration dur ++ ")") $ do + path <- Paths.getDataFileName "data/config-per-target-0_2.json" + Harness.runPipelineIsolationTest path 1 dur + + -- Pipeline test 2: multi-group, per-group input queues. + -- ---------------------------------------------------- + -- + -- 50 workloads, each with 1 target at 1 TPS (50 TPS aggregate). + -- Each workload has its own input queue; recycled inputs must return to the + -- originating workload's queue and never leak to another group. + -- + -- Inputs are tagged with (workloadIndex, inputIndex) tuples. If any worker + -- ever observes an input with a foreign workload tag, the test fails + -- immediately. This also exercises resolve's partition logic. + + , HUnit.testCase + "Multi-group pipeline isolation (50 groups x 1 TPS, 10s)" $ do + path <- Paths.getDataFileName "data/config-multi-group.json" + Harness.runPipelineIsolationTest path 50 10 + + ] diff --git a/bench/tx-centrifuge/test/tx-centrifuge/Main.hs b/bench/tx-centrifuge/test/tx-centrifuge/Main.hs new file mode 100644 index 00000000000..3c0c71bd929 --- /dev/null +++ b/bench/tx-centrifuge/test/tx-centrifuge/Main.hs @@ -0,0 +1,17 @@ +{-# LANGUAGE ImportQualifiedPost #-} + +module Main where + +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +--------------------- +-- tx-centrifuge -- +--------------------- +import Test.TxCentrifuge.TxTest qualified as TxTest + +main :: IO () +main = Tasty.defaultMain $ Tasty.testGroup "tx-centrifuge" + [ TxTest.txTests + ] diff --git a/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs b/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs new file mode 100644 index 00000000000..fe89b48500a --- /dev/null +++ b/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs @@ -0,0 +1,223 @@ +{-# LANGUAGE ImportQualifiedPost #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-------------------------------------------------------------------------------- + +module Test.TxCentrifuge.TxTest + ( txTests + , testSetup + , mkDummyFund + ) where + +-------------------------------------------------------------------------------- + +---------- +-- base -- +---------- +import System.IO (hFlush, hPutStrLn, stderr) +----------- +-- aeson -- +----------- +import Data.Aeson qualified as Aeson +----------------- +-- cardano-api -- +----------------- +import Cardano.Api qualified as Api +------------------------- +-- cardano-ledger-core -- +------------------------- +import Cardano.Ledger.Coin qualified as L +----------- +-- clock -- +----------- +import System.Clock qualified as Clock +----------- +-- tasty -- +----------- +import Test.Tasty qualified as Tasty +----------------- +-- tasty-hunit -- +----------------- +import Test.Tasty.HUnit ((@?=)) +import Test.Tasty.HUnit qualified as HUnit +------------------ +-- tx-generator -- +------------------ +import Cardano.TxGenerator.ProtocolParameters qualified as PP +--------------------- +-- tx-centrifuge -- +--------------------- +import Cardano.Benchmarking.TxCentrifuge.Fund qualified as Fund +import Cardano.Benchmarking.TxCentrifuge.Tx qualified as Tx +import Paths_tx_centrifuge qualified as Paths + +-------------------------------------------------------------------------------- + +txTests :: Tasty.TestTree +txTests = Tasty.testGroup "node" + [ HUnit.testCase "buildTx: simple 1-in-1-out transaction" $ do + (ledgerPP, signKey, addr) <- testSetup + let fund = mkDummyFund signKey 0 10_000_000 + fee = L.Coin 200_000 + case Tx.buildTx ledgerPP addr signKey [fund] 1 fee of + Left err -> + HUnit.assertFailure $ "buildTx failed: " ++ err + Right (tx, outFunds) -> do + -- One output fund recycled. + length outFunds @?= 1 + -- Output value = input - fee. + Fund.fundValue (head outFunds) @?= (10_000_000 - 200_000) + -- The recycled fund's TxIn references the new tx. + let txId = Api.getTxId (Api.getTxBody tx) + Fund.fundTxIn (head outFunds) + @?= Api.TxIn txId (Api.TxIx 0) + + , HUnit.testCase "buildTx: 2-in-3-out transaction" $ do + (ledgerPP, signKey, addr) <- testSetup + let fund1 = mkDummyFund signKey 0 5_000_000 + fund2 = mkDummyFund signKey 1 5_000_000 + fee = L.Coin 200_000 + case Tx.buildTx ledgerPP addr signKey + [fund1, fund2] 3 fee of + Left err -> + HUnit.assertFailure $ "buildTx failed: " ++ err + Right (_tx, outFunds) -> do + -- Three output funds. + length outFunds @?= 3 + -- Total output = total input - fee. + let totalOut = sum (map Fund.fundValue outFunds) + totalOut @?= (10_000_000 - 200_000) + + , HUnit.testCase "buildTx: insufficient funds" $ do + (ledgerPP, signKey, addr) <- testSetup + let fund = mkDummyFund signKey 0 100_000 + fee = L.Coin 200_000 + case Tx.buildTx ledgerPP addr signKey [fund] 1 fee of + Left _ -> pure () -- expected + Right _ -> + HUnit.assertFailure + "buildTx should fail when funds < fee" + + , HUnit.testCase "buildTx: no input funds" $ do + (ledgerPP, signKey, addr) <- testSetup + let fee = L.Coin 200_000 + case Tx.buildTx ledgerPP addr signKey [] 1 fee of + Left _ -> pure () -- expected + Right _ -> + HUnit.assertFailure + "buildTx should fail with no inputs" + + , HUnit.testCase "buildTx: zero outputs" $ do + (ledgerPP, signKey, addr) <- testSetup + let fund = mkDummyFund signKey 0 10_000_000 + fee = L.Coin 200_000 + case Tx.buildTx ledgerPP addr signKey [fund] 0 fee of + Left _ -> pure () -- expected + Right _ -> + HUnit.assertFailure + "buildTx should fail with 0 outputs" + + , HUnit.testCase + "buildTx: signing throughput (single-threaded)" $ do + (ledgerPP, signKey, addr) <- testSetup + -- Build N transactions sequentially and measure wall-clock + -- time. This quantifies the single-threaded builder bottleneck. + let n = 10_000 :: Int + fee = L.Coin 200_000 + -- Use a large initial fund so recycling doesn't deplete it. + initialFund = mkDummyFund signKey 0 1_000_000_000_000 + + start <- Clock.getTime Clock.MonotonicRaw + go n initialFund ledgerPP addr signKey fee + end <- Clock.getTime Clock.MonotonicRaw + + let elapsedNs = Clock.toNanoSecs (end - start) + elapsedS = fromIntegral elapsedNs / 1e9 :: Double + tps = fromIntegral n / elapsedS + + hPutStrLn stderr "" + hPutStrLn stderr + " --- Single-threaded buildTx throughput ---" + hPutStrLn stderr $ " txs built: " ++ show n + hPutStrLn stderr $ " elapsed: " ++ show elapsedS ++ " s" + hPutStrLn stderr $ + " throughput: " ++ show (round tps :: Int) ++ " tx/s" + hFlush stderr + + -- Sanity: we should be able to sign at least 1000 tx/s on any + -- reasonable hardware. This is not a hard performance target, + -- just a smoke test that buildTx isn't catastrophically slow. + HUnit.assertBool + ("buildTx throughput too low: " + ++ show (round tps :: Int) ++ " tx/s") + (tps > 1000) + ] + where + -- Build N txs sequentially, recycling the first output each time. + go :: Int -> Fund.Fund + -> Api.LedgerProtocolParameters Api.ConwayEra + -> Api.AddressInEra Api.ConwayEra + -> Api.SigningKey Api.PaymentKey -> L.Coin -> IO () + go 0 _ _ _ _ _ = pure () + go remaining fund ledgerPP addr signKey fee = + case Tx.buildTx ledgerPP addr signKey [fund] 1 fee of + Left err -> + error $ "throughput test: buildTx failed at iteration " + ++ show remaining ++ ": " ++ err + Right (_, outFunds) -> + go (remaining - 1) (head outFunds) + ledgerPP addr signKey fee + +-------------------------------------------------------------------------------- +-- Test helpers +-------------------------------------------------------------------------------- + +-- | Load protocol parameters and create common test fixtures. +testSetup + :: IO ( Api.LedgerProtocolParameters Api.ConwayEra + , Api.SigningKey Api.PaymentKey + , Api.AddressInEra Api.ConwayEra + ) +testSetup = do + -- Load protocol parameters from the CI test file. + ppPath <- Paths.getDataFileName "data/protocol-parameters.ci-test.json" + protocolParameters <- + Aeson.eitherDecodeFileStrict' ppPath >>= either fail pure + ledgerPP <- + case PP.convertToLedgerProtocolParameters + Api.ShelleyBasedEraConway protocolParameters of + Left err -> + fail $ "convertToLedgerProtocolParameters: " ++ show err + Right pp -> pure pp + + -- Generate a fresh signing key and derive its address. + signKey <- Api.generateSigningKey Api.AsPaymentKey + let networkId = Api.Testnet (Api.NetworkMagic 42) + addr = Api.shelleyAddressInEra + (Api.shelleyBasedEra @Api.ConwayEra) + $ Api.makeShelleyAddress networkId + (Api.PaymentCredentialByKey + (Api.verificationKeyHash + (Api.getVerificationKey signKey))) + Api.NoStakeAddress + + pure (ledgerPP, signKey, addr) + +-- | Create a dummy fund with a synthetic TxIn. Uses the signing key's +-- verification key hash to derive a deterministic TxId (via +-- 'Api.genesisUTxOPseudoTxIn') and the caller-supplied @index@ as the +-- 'Api.TxIx'. Each distinct @index@ produces a unique 'Api.TxIn', so +-- multi-input tests can create several funds from the same key without +-- accidentally producing duplicate inputs. +mkDummyFund :: Api.SigningKey Api.PaymentKey -> Word -> Integer -> Fund.Fund +mkDummyFund signKey index lovelace = Fund.Fund + { Fund.fundTxIn = + let Api.TxIn txId _ = Fund.genesisTxIn + (Api.Testnet (Api.NetworkMagic 42)) + signKey + in Api.TxIn txId (Api.TxIx index) + , Fund.fundValue = lovelace + , Fund.fundSignKey = signKey + } diff --git a/bench/tx-centrifuge/tx-centrifuge.cabal b/bench/tx-centrifuge/tx-centrifuge.cabal new file mode 100644 index 00000000000..0dd7f29da28 --- /dev/null +++ b/bench/tx-centrifuge/tx-centrifuge.cabal @@ -0,0 +1,228 @@ +cabal-version: 3.0 + +name: tx-centrifuge +version: 0.1.0.0 +synopsis: Standalone transaction generator for Cardano benchmarking +description: A ground-up transaction generator built to support Leios + benchmarking without risking regressions in the + historical baselines produced by tx-generator over + several years of release benchmarks. + . + Rather than retrofitting new capabilities (cumulative + scheduling, per-target fairness, closed-loop recycling, + non-blocking STM, independent per-workload pipelines) + into the existing tx-generator — where any behavioural + change could silently invalidate years of baseline data + — tx-centrifuge implements them from scratch behind a + clean, pull-based architecture. The two generators + coexist: tx-generator continues to produce comparable + release benchmarks while tx-centrifuge targets the + higher TPS rates and workload isolation that Leios + requires. +category: Cardano, + Benchmark, +copyright: 2026 Intersect MBO. +author: Federico Mastellone (210034+fmaste@users.noreply.github.com) +license: Apache-2.0 +license-files: LICENSE + NOTICE +build-type: Simple + +extra-doc-files: README.md +data-files: data/config-shared-10.json + data/config-shared-100k.json + data/config-per-target-0_2.json + data/config-per-target-2k.json + data/config-per-target-200.json + data/config-multi-group.json + data/protocol-parameters.ci-test.json + +-------------------------------------------------------------------------------- + +common project-config + default-language: Haskell2010 + +common ghc-warnings + ghc-options: -Wall + -Wcompat + -Wincomplete-record-updates + -Wincomplete-uni-patterns + -Wno-prepositive-qualified-module + -Wno-unticked-promoted-constructors + -Wpartial-fields + -Wredundant-constraints + -fobject-code -fno-ignore-interface-pragmas + -fno-omit-interface-pragmas + +-- -N: multicore runtime capabilities (critical for high-TPS CPU load). +-- -A64m: larger nursery to reduce minor-GC frequency in the hot path. +-- -T: RTS stats available for tuning/regression checks. +common rts-defaults + ghc-options: -threaded + -rtsopts + "-with-rtsopts=-N -A64m -T" + +-------------------------------------------------------------------------------- + +executable tx-centrifuge + import: project-config, ghc-warnings, rts-defaults + hs-source-dirs: app + main-is: Main.hs + + build-depends: base + , aeson + , async + , bytestring + , cardano-api + , cardano-ledger-core + , cardano-node + , containers + , network + , ouroboros-consensus + , ouroboros-consensus-cardano + , ouroboros-network-framework + , stm + , text + , transformers + , tx-generator + , tx-centrifuge:pull-fiction + , tx-centrifuge:tx-centrifuge-lib + +-------------------------------------------------------------------------------- + +-- | Domain-independent, pull-based load generation engine. +-- +-- Provides rate-limited pipeline management (input queue, payload queue, +-- closed-loop recycling), GCRA-based admission control, and workload +-- orchestration. Zero Cardano dependencies — the library is parameterised over +-- abstract input and payload types so it can drive any pull-based protocol +-- (e.g. Cardano's TxSubmission2 mini-protocol). +library pull-fiction + import: project-config, ghc-warnings + + hs-source-dirs: lib/pull-fiction + visibility: public + + exposed-modules: Cardano.Benchmarking.PullFiction.Config.Raw + Cardano.Benchmarking.PullFiction.Config.Runtime + Cardano.Benchmarking.PullFiction.Config.Validated + Cardano.Benchmarking.PullFiction.Clock + Cardano.Benchmarking.PullFiction.WorkloadRunner + + other-modules: Cardano.Benchmarking.PullFiction.Internal.RateLimiter + + build-depends: base >=4.12 && <5 + , aeson + , async + , clock + , containers + , stm + , text + +-- Sub-library with node functionality decoupled from the core library above. +library tx-centrifuge-lib + import: project-config, ghc-warnings + + hs-source-dirs: lib/tx-centrifuge + visibility: public + + exposed-modules: Cardano.Benchmarking.TxCentrifuge.Client + Cardano.Benchmarking.TxCentrifuge.Connection + Cardano.Benchmarking.TxCentrifuge.Fund + Cardano.Benchmarking.TxCentrifuge.Tracing + Cardano.Benchmarking.TxCentrifuge.Tracing.Orphans + Cardano.Benchmarking.TxCentrifuge.Tx + + build-depends: base >=4.12 && <5 + , aeson + , bytestring + , cardano-api ^>= 10.24.1 + , cardano-ledger-core + , containers + , io-classes:{io-classes, strict-stm} + , network + , network-mux + , ouroboros-consensus >= 0.6 + , ouroboros-consensus-cardano >= 0.5 + , ouroboros-consensus-diffusion >= 0.7.0 + , ouroboros-network + , ouroboros-network-api + , ouroboros-network-framework + , ouroboros-network-protocols + , random + , serialise + , stm + , contra-tracer + , text + , trace-dispatcher + , typed-protocols:{typed-protocols, stateful} >= 1.0 + +-------------------------------------------------------------------------------- + +-- Test suites import rts-defaults so performance/fairness behavior matches +-- production runtime defaults instead of a different RTS profile. + +test-suite pull-fiction-test + import: project-config, ghc-warnings, rts-defaults + type: exitcode-stdio-1.0 + hs-source-dirs: test/pull-fiction + main-is: Main.hs + other-modules: Test.PullFiction.GeneratorTest + Test.PullFiction.PipelineTest + Paths_tx_centrifuge + autogen-modules: Paths_tx_centrifuge + build-depends: base >=4.12 && <5 + , containers + , tasty + , tasty-hunit + , tx-centrifuge:pull-fiction + , tx-centrifuge:test-harness + +test-suite tx-centrifuge-test + import: project-config, ghc-warnings, rts-defaults + type: exitcode-stdio-1.0 + hs-source-dirs: test/tx-centrifuge + main-is: Main.hs + other-modules: Test.TxCentrifuge.TxTest + Paths_tx_centrifuge + autogen-modules: Paths_tx_centrifuge + build-depends: base >=4.12 && <5 + , aeson + , cardano-api + , cardano-ledger-core + , clock + , tasty + , tasty-hunit + , tx-centrifuge:tx-centrifuge-lib + , tx-generator + +library test-harness + import: project-config, ghc-warnings + visibility: private + hs-source-dirs: test/lib + exposed-modules: Test.PullFiction.Harness + build-depends: base >=4.12 && <5 + , aeson + , async + , clock + , containers + , stm + , tx-centrifuge:pull-fiction + +-- Bench imports rts-defaults so benchmark numbers are measured with the same +-- RTS configuration used by the executable and tests. +-------------------------------------------------------------------------------- + +benchmark core-bench + import: project-config, ghc-warnings, rts-defaults + type: exitcode-stdio-1.0 + hs-source-dirs: bench + main-is: Bench.hs + other-modules: Paths_tx_centrifuge + autogen-modules: Paths_tx_centrifuge + build-depends: base >=4.12 && <5 + , containers + , criterion + , deepseq + , tx-centrifuge:pull-fiction + , tx-centrifuge:test-harness diff --git a/cabal.project b/cabal.project index 1a821ddc7db..2881fb5dd3e 100644 --- a/cabal.project +++ b/cabal.project @@ -31,6 +31,7 @@ packages: bench/cardano-topology bench/locli bench/plutus-scripts-bench + bench/tx-centrifuge bench/tx-generator bench/cardano-recon-framework trace-dispatcher From 314ecc8b69bc896d53e262b4777cc4ec5dffc50a Mon Sep 17 00:00:00 2001 From: Federico Mastellone Date: Fri, 20 Mar 2026 07:30:52 +0000 Subject: [PATCH 6/6] WIP: -nomadperf --- .../Benchmarking/Profile/Vocabulary.hs | 4 +- bench/tx-centrifuge/app/Main.hs | 39 ++++++++++--------- .../Cardano/Benchmarking/TxCentrifuge/Tx.hs | 11 +++--- .../tx-centrifuge/Test/TxCentrifuge/TxTest.hs | 22 +++++------ nix/workbench/backend/nomad-job.nix | 28 ++++++++++++- nix/workbench/service/generator.nix | 16 ++++++-- 6 files changed, 78 insertions(+), 42 deletions(-) diff --git a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs index c69d4f5ceec..1566a4ffc06 100644 --- a/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs +++ b/bench/cardano-profile/src/Cardano/Benchmarking/Profile/Vocabulary.hs @@ -116,11 +116,11 @@ genesisVariantVoltaire = genesisVariantLatest -- Defined in the "genesis" property and it's for the tx-generator. fundsDefault :: Types.Profile -> Types.Profile -fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000 . P.utxoKeys (6*500*3) +fundsDefault = P.poolBalance 1000000000000000 . P.funds 10000000000000 . P.utxoKeys (52*500*3) -- Some profiles have a higher `funds_balance` in `Genesis`. Needed? Fix it? fundsDouble :: Types.Profile -> Types.Profile -fundsDouble = P.poolBalance 1000000000000000 . P.funds 20000000000000 . P.utxoKeys 1 +fundsDouble = P.poolBalance 1000000000000000 . P.funds 20000000000000 . P.utxoKeys (52*500*3) fundsVoting :: Types.Profile -> Types.Profile fundsVoting = P.poolBalance 1000000000000000 . P.funds 40000000000000 . P.utxoKeys 2 diff --git a/bench/tx-centrifuge/app/Main.hs b/bench/tx-centrifuge/app/Main.hs index b3ceb12ce96..26af1d44c3a 100644 --- a/bench/tx-centrifuge/app/Main.hs +++ b/bench/tx-centrifuge/app/Main.hs @@ -95,10 +95,6 @@ import Control.Concurrent.STM qualified as STM ------------------ import Control.Monad.Trans.Except (runExceptT) ------------------ --- tx-generator -- ------------------- -import Cardano.TxGenerator.ProtocolParameters qualified as PP ------------------- -- pull-fiction -- ------------------ import Cardano.Benchmarking.PullFiction.Config.Raw qualified as Raw @@ -123,7 +119,7 @@ main = do -- Config. ---------- - (runtime, codecConfig, networkId, networkMagic, ledgerPP, tracers) <- loadConfig + (runtime, codecConfig, networkId, networkMagic, tracers) <- loadConfig -- Launch. ---------- @@ -148,7 +144,7 @@ main = do replicateM (fromIntegral (inputsPerTx vb)) (STM.readTQueue fundQueue) - case Tx.buildTx ledgerPP signingAddr signingKey + case Tx.buildTx signingAddr signingKey inputFunds (outputsPerTx vb) (L.Coin (fee vb)) of Left err -> die $ "Tx.buildTx: " ++ err Right ans@(tx, outputFunds) -> do @@ -374,6 +370,23 @@ createSigningKeyAndAddress networkId n Api.NoStakeAddress in (signingKey, signingAddr) +-------------------------------------------------------------------------------- +-- Cardano parameters +-------------------------------------------------------------------------------- + +{-- TODO: Construct a minimal protocol parameters, see Tx.hs last line. +data ProtocolParameters = ProtocolParameters + { epochLength :: Integer + , minFeeA :: Integer + , minFeeB :: Integer + } + +instance Aeson.FromJSON ProtocolParameters where + parseJSON = Aeson.withObject "ProtocolParameters" $ \o -> do + pp <- o .: "params" + ProtocolParameters <$> pp .: "epoch_length" <*> pp .: "min_fee_a" <*> pp .: "min_fee_b" +--} + -------------------------------------------------------------------------------- -- Initialization -------------------------------------------------------------------------------- @@ -392,8 +405,6 @@ loadConfig , Api.NetworkId -- | Network magic for the handshake with cardano-node. , Api.NetworkMagic - -- | Ledger protocol parameters for transaction building. - , Api.LedgerProtocolParameters Api.ConwayEra -- | Logging / metrics tracers. , Tracing.Tracers ) @@ -417,7 +428,6 @@ loadConfig = do Left err -> die $ "Config: " ++ err Right v -> pure v nodeConfigPath <- parseField "nodeConfig" - ppPath <- parseField "protocolParametersFile" raw <- case Aeson.fromJSON rawValue of Aeson.Error err -> die $ "JSON: " ++ err Aeson.Success cfg -> pure cfg @@ -450,19 +460,10 @@ loadConfig = do networkId = protocolToNetworkId protocol networkMagic = protocolToNetworkMagic protocol - -- Load protocol parameters. - hPutStrLn stderr $ "Loading protocol parameters from: " ++ ppPath - protocolParameters <- - Aeson.eitherDecodeFileStrict' ppPath >>= either die pure - ledgerPP <- case PP.convertToLedgerProtocolParameters - Api.ShelleyBasedEraConway protocolParameters of - Left err -> die $ "convertToLedgerProtocolParameters: " ++ show err - Right pp -> pure pp - -- Tracers. tracers <- Tracing.setupTracers configFile - pure ( runtime, codecConfig, networkId, networkMagic, ledgerPP, tracers ) + pure ( runtime, codecConfig, networkId, networkMagic, tracers ) -------------------------------------------------------------------------------- -- Protocol helpers (inlined from NodeConfig.hs and OuroborosImports.hs) diff --git a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs index a958bab1732..581512bb4d1 100644 --- a/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs +++ b/bench/tx-centrifuge/lib/tx-centrifuge/Cardano/Benchmarking/TxCentrifuge/Tx.hs @@ -39,10 +39,8 @@ import Cardano.Benchmarking.TxCentrifuge.Fund ( Fund(..) ) -- -- Fixed to ConwayEra. No Plutus, no metadata, fixed fee. buildTx - -- | Ledger protocol parameters. - :: Api.LedgerProtocolParameters Api.ConwayEra -- | Destination address for outputs (embeds the network identifier). - -> Api.AddressInEra Api.ConwayEra + :: Api.AddressInEra Api.ConwayEra -- | Signing key for recycled output funds. -> Api.SigningKey Api.PaymentKey -- | Input funds. @@ -52,7 +50,7 @@ buildTx -- | Fee. -> L.Coin -> Either String (Api.Tx Api.ConwayEra, [Fund]) -buildTx ledgerPP destAddr outKey inFunds numOutputs fee +buildTx destAddr outKey inFunds numOutputs fee | null inFunds = Left "buildTx: no input funds" | numOutputs == 0 = Left "buildTx: outputs_per_tx must be >= 1" | feeLovelace < 0 = Left "buildTx: fee must be >= 0" @@ -165,4 +163,7 @@ buildTx ledgerPP destAddr outKey inFunds numOutputs fee Api.ShelleyBasedEraConway ) & Api.setTxMetadata Api.TxMetadataNone - & Api.setTxProtocolParams (Api.BuildTxWith (Just ledgerPP)) + -- We are using an explicit fee! + -- Using `Nothing` instead of `ledgerPP :: Api.LedgerProtocolParameters Api.ConwayEra`. + -- TODO: Will need something else for plutus scripts! + & Api.setTxProtocolParams (Api.BuildTxWith Nothing) diff --git a/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs b/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs index fe89b48500a..9786e56f2e6 100644 --- a/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs +++ b/bench/tx-centrifuge/test/tx-centrifuge/Test/TxCentrifuge/TxTest.hs @@ -58,10 +58,10 @@ import Paths_tx_centrifuge qualified as Paths txTests :: Tasty.TestTree txTests = Tasty.testGroup "node" [ HUnit.testCase "buildTx: simple 1-in-1-out transaction" $ do - (ledgerPP, signKey, addr) <- testSetup + (_ledgerPP, signKey, addr) <- testSetup let fund = mkDummyFund signKey 0 10_000_000 fee = L.Coin 200_000 - case Tx.buildTx ledgerPP addr signKey [fund] 1 fee of + case Tx.buildTx {-- ledgerPP --} addr signKey [fund] 1 fee of Left err -> HUnit.assertFailure $ "buildTx failed: " ++ err Right (tx, outFunds) -> do @@ -75,11 +75,11 @@ txTests = Tasty.testGroup "node" @?= Api.TxIn txId (Api.TxIx 0) , HUnit.testCase "buildTx: 2-in-3-out transaction" $ do - (ledgerPP, signKey, addr) <- testSetup + (_ledgerPP, signKey, addr) <- testSetup let fund1 = mkDummyFund signKey 0 5_000_000 fund2 = mkDummyFund signKey 1 5_000_000 fee = L.Coin 200_000 - case Tx.buildTx ledgerPP addr signKey + case Tx.buildTx {-- ledgerPP --} addr signKey [fund1, fund2] 3 fee of Left err -> HUnit.assertFailure $ "buildTx failed: " ++ err @@ -91,29 +91,29 @@ txTests = Tasty.testGroup "node" totalOut @?= (10_000_000 - 200_000) , HUnit.testCase "buildTx: insufficient funds" $ do - (ledgerPP, signKey, addr) <- testSetup + (_ledgerPP, signKey, addr) <- testSetup let fund = mkDummyFund signKey 0 100_000 fee = L.Coin 200_000 - case Tx.buildTx ledgerPP addr signKey [fund] 1 fee of + case Tx.buildTx {-- ledgerPP --} addr signKey [fund] 1 fee of Left _ -> pure () -- expected Right _ -> HUnit.assertFailure "buildTx should fail when funds < fee" , HUnit.testCase "buildTx: no input funds" $ do - (ledgerPP, signKey, addr) <- testSetup + (_ledgerPP, signKey, addr) <- testSetup let fee = L.Coin 200_000 - case Tx.buildTx ledgerPP addr signKey [] 1 fee of + case Tx.buildTx {-- ledgerPP --} addr signKey [] 1 fee of Left _ -> pure () -- expected Right _ -> HUnit.assertFailure "buildTx should fail with no inputs" , HUnit.testCase "buildTx: zero outputs" $ do - (ledgerPP, signKey, addr) <- testSetup + (_ledgerPP, signKey, addr) <- testSetup let fund = mkDummyFund signKey 0 10_000_000 fee = L.Coin 200_000 - case Tx.buildTx ledgerPP addr signKey [fund] 0 fee of + case Tx.buildTx {-- ledgerPP --} addr signKey [fund] 0 fee of Left _ -> pure () -- expected Right _ -> HUnit.assertFailure @@ -162,7 +162,7 @@ txTests = Tasty.testGroup "node" -> Api.SigningKey Api.PaymentKey -> L.Coin -> IO () go 0 _ _ _ _ _ = pure () go remaining fund ledgerPP addr signKey fee = - case Tx.buildTx ledgerPP addr signKey [fund] 1 fee of + case Tx.buildTx {-- ledgerPP --} addr signKey [fund] 1 fee of Left err -> error $ "throughput test: buildTx failed at iteration " ++ show remaining ++ ": " ++ err diff --git a/nix/workbench/backend/nomad-job.nix b/nix/workbench/backend/nomad-job.nix index f012eeafb07..8b18f0127c6 100644 --- a/nix/workbench/backend/nomad-job.nix +++ b/nix/workbench/backend/nomad-job.nix @@ -895,7 +895,7 @@ let in # Recreate the "run-script.json" with IPs and ports that are # nomad template variables. - (runScriptToGoTemplate + (runScriptToGoTemplate2 runScript # Just the node names. (lib.attrsets.mapAttrsToList @@ -1394,6 +1394,32 @@ let '' ; + runScriptToGoTemplate2 = runScript: _: builtins.replaceStrings + ( + (builtins.genList + (i: ''__addr_${toString i}__'') + 100 + ) + ++ + (builtins.genList + (i: ''"__port_${toString i}__"'') + 100 + ) + ) + ( + (builtins.genList + (i: ''{{range nomadService "${(nodeNameToServicePortName "node-${toString i}")}"}}{{.Address}}{{end}}'') + 100 + ) + ++ + (builtins.genList + (i: ''{{range nomadService "${(nodeNameToServicePortName "node-${toString i}")}"}}{{.Port}}{{end}}'') + 100 + ) + ) + (lib.generators.toJSON {} runScript) + ; + # Convert from generator's "run-script.json" with all addresses being # "127.0.0.01" to one with all addresses being a placeholder like # "{{NOMAD_IP_node-X}}". diff --git a/nix/workbench/service/generator.nix b/nix/workbench/service/generator.nix index fe8e0d346a8..6945f818c3c 100644 --- a/nix/workbench/service/generator.nix +++ b/nix/workbench/service/generator.nix @@ -191,7 +191,7 @@ __toJSON rate_limit = { scope = "shared"; type = "token_bucket"; - params = { tps = 15; }; + params = { tps = 12; }; } ; max_batch_size = 500; @@ -205,8 +205,11 @@ __toJSON value = { targets = { "${toString i}" = - { addr = "127.0.0.1"; - port = (30000 + i); + # { addr = "127.0.0.1"; + # port = (30000 + i); + # } + { addr = "__addr_${toString i}__"; + port = "__port_${toString i}__"; } ; } @@ -221,7 +224,12 @@ __toJSON # tx-centrifuge parameters. ########################### nodeConfig = "../${runningNode}/config.json"; - protocolParametersFile = "/tmp/protocol-parameters-queried.json"; + protocol_parameters = + { epoch_length = profile.genesis.shelley.epochLength; + min_fee_a = profile.genesis.shelley.protocolParams.minFeeA; + min_fee_b = profile.genesis.shelley.protocolParams.minFeeB; + } + ; # Tracing parameters. ##################### TraceOptions =